diff --git a/pyproject.toml b/pyproject.toml index e7789e0..b4f9217 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "gooeyai" -version = "0.0.1-beta7" +version = "0.0.1-beta8" description = "" readme = "README.md" authors = [] diff --git a/reference.md b/reference.md index f45e317..994d108 100644 --- a/reference.md +++ b/reference.md @@ -5110,6228 +5110,6 @@ client.health_status_get() - - - - -
client.post_v3chyron_plant_async_form(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3chyron_plant_async_form( - midi_notes="midi_notes", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**midi_notes:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.List[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**midi_notes_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**chyron_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.post_v3compare_llm_async_form(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3compare_llm_async_form() - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**functions:** `typing.Optional[typing.List[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**input_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**selected_models:** `typing.Optional[typing.List[PostV3CompareLlmAsyncFormRequestSelectedModelsItem]]` - -
-
- -
-
- -**avoid_repetition:** `typing.Optional[bool]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[float]` - -
-
- -
-
- -**max_tokens:** `typing.Optional[int]` - -
-
- -
-
- -**sampling_temperature:** `typing.Optional[float]` - -
-
- -
-
- -**response_format_type:** `typing.Optional[PostV3CompareLlmAsyncFormRequestResponseFormatType]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.post_v3compare_text2img_async_form(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3compare_text2img_async_form( - text_prompt="text_prompt", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**text_prompt:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.List[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**negative_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**output_width:** `typing.Optional[int]` - -
-
- -
-
- -**output_height:** `typing.Optional[int]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[int]` - -
-
- -
-
- -**dall_e3quality:** `typing.Optional[str]` - -
-
- -
-
- -**dall_e3style:** `typing.Optional[str]` - -
-
- -
-
- -**guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**seed:** `typing.Optional[int]` - -
-
- -
-
- -**sd2upscaling:** `typing.Optional[bool]` - -
-
- -
-
- -**selected_models:** `typing.Optional[ - typing.List[PostV3CompareText2ImgAsyncFormRequestSelectedModelsItem] -]` - -
-
- -
-
- -**scheduler:** `typing.Optional[PostV3CompareText2ImgAsyncFormRequestScheduler]` - -
-
- -
-
- -**edit_instruction:** `typing.Optional[str]` - -
-
- -
-
- -**image_guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.post_v3deforum_sd_async_form(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import AnimationPrompt, Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3deforum_sd_async_form( - animation_prompts=[ - AnimationPrompt( - frame="frame", - prompt="prompt", - ) - ], -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**animation_prompts:** `typing.List[AnimationPrompt]` - -
-
- -
-
- -**functions:** `typing.Optional[typing.List[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**max_frames:** `typing.Optional[int]` - -
-
- -
-
- -**selected_model:** `typing.Optional[PostV3DeforumSdAsyncFormRequestSelectedModel]` - -
-
- -
-
- -**animation_mode:** `typing.Optional[str]` - -
-
- -
-
- -**zoom:** `typing.Optional[str]` - -
-
- -
-
- -**translation_x:** `typing.Optional[str]` - -
-
- -
-
- -**translation_y:** `typing.Optional[str]` - -
-
- -
-
- -**rotation3d_x:** `typing.Optional[str]` - -
-
- -
-
- -**rotation3d_y:** `typing.Optional[str]` - -
-
- -
-
- -**rotation3d_z:** `typing.Optional[str]` - -
-
- -
-
- -**fps:** `typing.Optional[int]` - -
-
- -
-
- -**seed:** `typing.Optional[int]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.post_v3email_face_inpainting_async_form(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3email_face_inpainting_async_form( - text_prompt="text_prompt", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**text_prompt:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.List[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**email_address:** `typing.Optional[str]` - -
-
- -
-
- -**twitter_handle:** `typing.Optional[str]` - -
-
- -
-
- -**face_scale:** `typing.Optional[float]` - -
-
- -
-
- -**face_pos_x:** `typing.Optional[float]` - -
-
- -
-
- -**face_pos_y:** `typing.Optional[float]` - -
-
- -
-
- -**selected_model:** `typing.Optional[PostV3EmailFaceInpaintingAsyncFormRequestSelectedModel]` - -
-
- -
-
- -**negative_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[int]` - -
-
- -
-
- -**upscale_factor:** `typing.Optional[float]` - -
-
- -
-
- -**output_width:** `typing.Optional[int]` - -
-
- -
-
- -**output_height:** `typing.Optional[int]` - -
-
- -
-
- -**guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**should_send_email:** `typing.Optional[bool]` - -
-
- -
-
- -**email_from:** `typing.Optional[str]` - -
-
- -
-
- -**email_cc:** `typing.Optional[str]` - -
-
- -
-
- -**email_bcc:** `typing.Optional[str]` - -
-
- -
-
- -**email_subject:** `typing.Optional[str]` - -
-
- -
-
- -**email_body:** `typing.Optional[str]` - -
-
- -
-
- -**email_body_enable_html:** `typing.Optional[bool]` - -
-
- -
-
- -**fallback_email_body:** `typing.Optional[str]` - -
-
- -
-
- -**seed:** `typing.Optional[int]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.post_v3face_inpainting_async_form(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3face_inpainting_async_form( - input_image="input_image", - text_prompt="text_prompt", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**input_image:** `str` - -
-
- -
-
- -**text_prompt:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.List[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**face_scale:** `typing.Optional[float]` - -
-
- -
-
- -**face_pos_x:** `typing.Optional[float]` - -
-
- -
-
- -**face_pos_y:** `typing.Optional[float]` - -
-
- -
-
- -**selected_model:** `typing.Optional[PostV3FaceInpaintingAsyncFormRequestSelectedModel]` - -
-
- -
-
- -**negative_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[int]` - -
-
- -
-
- -**upscale_factor:** `typing.Optional[float]` - -
-
- -
-
- -**output_width:** `typing.Optional[int]` - -
-
- -
-
- -**output_height:** `typing.Optional[int]` - -
-
- -
-
- -**guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**seed:** `typing.Optional[int]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.post_v3google_image_gen_async_form(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3google_image_gen_async_form( - search_query="search_query", - text_prompt="text_prompt", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**search_query:** `str` - -
-
- -
-
- -**text_prompt:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.List[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**serp_search_location:** `typing.Optional[SerpSearchLocation]` - -
-
- -
-
- -**scaleserp_locations:** `typing.Optional[typing.List[str]]` — DEPRECATED: use `serp_search_location` instead - -
-
- -
-
- -**selected_model:** `typing.Optional[PostV3GoogleImageGenAsyncFormRequestSelectedModel]` - -
-
- -
-
- -**negative_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[int]` - -
-
- -
-
- -**guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**prompt_strength:** `typing.Optional[float]` - -
-
- -
-
- -**sd2upscaling:** `typing.Optional[bool]` - -
-
- -
-
- -**seed:** `typing.Optional[int]` - -
-
- -
-
- -**image_guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.post_v3image_segmentation_async_form(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3image_segmentation_async_form( - input_image="input_image", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**input_image:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.List[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**selected_model:** `typing.Optional[PostV3ImageSegmentationAsyncFormRequestSelectedModel]` - -
-
- -
-
- -**mask_threshold:** `typing.Optional[float]` - -
-
- -
-
- -**rect_persepective_transform:** `typing.Optional[bool]` - -
-
- -
-
- -**reflection_opacity:** `typing.Optional[float]` - -
-
- -
-
- -**obj_scale:** `typing.Optional[float]` - -
-
- -
-
- -**obj_pos_x:** `typing.Optional[float]` - -
-
- -
-
- -**obj_pos_y:** `typing.Optional[float]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.post_v3img2img_async_form(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3img2img_async_form( - input_image="input_image", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**input_image:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.List[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**text_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**selected_model:** `typing.Optional[PostV3Img2ImgAsyncFormRequestSelectedModel]` - -
-
- -
-
- -**selected_controlnet_model:** `typing.Optional[PostV3Img2ImgAsyncFormRequestSelectedControlnetModel]` - -
-
- -
-
- -**negative_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[int]` - -
-
- -
-
- -**output_width:** `typing.Optional[int]` - -
-
- -
-
- -**output_height:** `typing.Optional[int]` - -
-
- -
-
- -**guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**prompt_strength:** `typing.Optional[float]` - -
-
- -
-
- -**controlnet_conditioning_scale:** `typing.Optional[typing.List[float]]` - -
-
- -
-
- -**seed:** `typing.Optional[int]` - -
-
- -
-
- -**image_guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.post_v3letter_writer_async_form(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3letter_writer_async_form( - action_id="action_id", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**action_id:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.List[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**prompt_header:** `typing.Optional[str]` - -
-
- -
-
- -**example_letters:** `typing.Optional[typing.List[TrainingDataModel]]` - -
-
- -
-
- -**lm_selected_api:** `typing.Optional[str]` - -
-
- -
-
- -**lm_selected_engine:** `typing.Optional[str]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[float]` - -
-
- -
-
- -**lm_sampling_temperature:** `typing.Optional[float]` - -
-
- -
-
- -**api_http_method:** `typing.Optional[str]` - -
-
- -
-
- -**api_url:** `typing.Optional[str]` - -
-
- -
-
- -**api_headers:** `typing.Optional[str]` - -
-
- -
-
- -**api_json_body:** `typing.Optional[str]` - -
-
- -
-
- -**input_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**strip_html2text:** `typing.Optional[bool]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.post_v3lipsync_async_form(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3lipsync_async_form() - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**functions:** `typing.Optional[typing.List[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**input_face:** `typing.Optional[str]` - -
-
- -
-
- -**face_padding_top:** `typing.Optional[int]` - -
-
- -
-
- -**face_padding_bottom:** `typing.Optional[int]` - -
-
- -
-
- -**face_padding_left:** `typing.Optional[int]` - -
-
- -
-
- -**face_padding_right:** `typing.Optional[int]` - -
-
- -
-
- -**sadtalker_settings:** `typing.Optional[SadTalkerSettings]` - -
-
- -
-
- -**selected_model:** `typing.Optional[PostV3LipsyncAsyncFormRequestSelectedModel]` - -
-
- -
-
- -**input_audio:** `typing.Optional[str]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.post_v3lipsync_tts_async_form(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3lipsync_tts_async_form( - text_prompt="text_prompt", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**text_prompt:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.List[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**tts_provider:** `typing.Optional[PostV3LipsyncTtsAsyncFormRequestTtsProvider]` - -
-
- -
-
- -**uberduck_voice_name:** `typing.Optional[str]` - -
-
- -
-
- -**uberduck_speaking_rate:** `typing.Optional[float]` - -
-
- -
-
- -**google_voice_name:** `typing.Optional[str]` - -
-
- -
-
- -**google_speaking_rate:** `typing.Optional[float]` - -
-
- -
-
- -**google_pitch:** `typing.Optional[float]` - -
-
- -
-
- -**bark_history_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead - -
-
- -
-
- -**elevenlabs_api_key:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_voice_id:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_model:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_stability:** `typing.Optional[float]` - -
-
- -
-
- -**elevenlabs_similarity_boost:** `typing.Optional[float]` - -
-
- -
-
- -**elevenlabs_style:** `typing.Optional[float]` - -
-
- -
-
- -**elevenlabs_speaker_boost:** `typing.Optional[bool]` - -
-
- -
-
- -**azure_voice_name:** `typing.Optional[str]` - -
-
- -
-
- -**openai_voice_name:** `typing.Optional[PostV3LipsyncTtsAsyncFormRequestOpenaiVoiceName]` - -
-
- -
-
- -**openai_tts_model:** `typing.Optional[PostV3LipsyncTtsAsyncFormRequestOpenaiTtsModel]` - -
-
- -
-
- -**input_face:** `typing.Optional[str]` - -
-
- -
-
- -**face_padding_top:** `typing.Optional[int]` - -
-
- -
-
- -**face_padding_bottom:** `typing.Optional[int]` - -
-
- -
-
- -**face_padding_left:** `typing.Optional[int]` - -
-
- -
-
- -**face_padding_right:** `typing.Optional[int]` - -
-
- -
-
- -**sadtalker_settings:** `typing.Optional[SadTalkerSettings]` - -
-
- -
-
- -**selected_model:** `typing.Optional[PostV3LipsyncTtsAsyncFormRequestSelectedModel]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.post_v3object_inpainting_async_form(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3object_inpainting_async_form( - input_image="input_image", - text_prompt="text_prompt", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**input_image:** `str` - -
-
- -
-
- -**text_prompt:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.List[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**obj_scale:** `typing.Optional[float]` - -
-
- -
-
- -**obj_pos_x:** `typing.Optional[float]` - -
-
- -
-
- -**obj_pos_y:** `typing.Optional[float]` - -
-
- -
-
- -**mask_threshold:** `typing.Optional[float]` - -
-
- -
-
- -**selected_model:** `typing.Optional[PostV3ObjectInpaintingAsyncFormRequestSelectedModel]` - -
-
- -
-
- -**negative_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[int]` - -
-
- -
-
- -**output_width:** `typing.Optional[int]` - -
-
- -
-
- -**output_height:** `typing.Optional[int]` - -
-
- -
-
- -**guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**sd2upscaling:** `typing.Optional[bool]` - -
-
- -
-
- -**seed:** `typing.Optional[int]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.post_v3seo_summary_async_form(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3seo_summary_async_form( - search_query="search_query", - keywords="keywords", - title="title", - company_url="company_url", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**search_query:** `str` - -
-
- -
-
- -**keywords:** `str` - -
-
- -
-
- -**title:** `str` - -
-
- -
-
- -**company_url:** `str` - -
-
- -
-
- -**task_instructions:** `typing.Optional[str]` - -
-
- -
-
- -**enable_html:** `typing.Optional[bool]` - -
-
- -
-
- -**selected_model:** `typing.Optional[PostV3SeoSummaryAsyncFormRequestSelectedModel]` - -
-
- -
-
- -**max_search_urls:** `typing.Optional[int]` - -
-
- -
-
- -**enable_crosslinks:** `typing.Optional[bool]` - -
-
- -
-
- -**seed:** `typing.Optional[int]` - -
-
- -
-
- -**avoid_repetition:** `typing.Optional[bool]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[float]` - -
-
- -
-
- -**max_tokens:** `typing.Optional[int]` - -
-
- -
-
- -**sampling_temperature:** `typing.Optional[float]` - -
-
- -
-
- -**response_format_type:** `typing.Optional[PostV3SeoSummaryAsyncFormRequestResponseFormatType]` - -
-
- -
-
- -**serp_search_location:** `typing.Optional[SerpSearchLocation]` - -
-
- -
-
- -**scaleserp_locations:** `typing.Optional[typing.List[str]]` — DEPRECATED: use `serp_search_location` instead - -
-
- -
-
- -**serp_search_type:** `typing.Optional[SerpSearchType]` - -
-
- -
-
- -**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.post_v3smart_gpt_async_form(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3smart_gpt_async_form( - input_prompt="input_prompt", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**input_prompt:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.List[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**cot_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**reflexion_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**dera_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**selected_model:** `typing.Optional[PostV3SmartGptAsyncFormRequestSelectedModel]` - -
-
- -
-
- -**avoid_repetition:** `typing.Optional[bool]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[float]` - -
-
- -
-
- -**max_tokens:** `typing.Optional[int]` - -
-
- -
-
- -**sampling_temperature:** `typing.Optional[float]` - -
-
- -
-
- -**response_format_type:** `typing.Optional[PostV3SmartGptAsyncFormRequestResponseFormatType]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.post_v3social_lookup_email_async_form(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3social_lookup_email_async_form( - email_address="email_address", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**email_address:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.List[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**input_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**selected_model:** `typing.Optional[PostV3SocialLookupEmailAsyncFormRequestSelectedModel]` - -
-
- -
-
- -**avoid_repetition:** `typing.Optional[bool]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[float]` - -
-
- -
-
- -**max_tokens:** `typing.Optional[int]` - -
-
- -
-
- -**sampling_temperature:** `typing.Optional[float]` - -
-
- -
-
- -**response_format_type:** `typing.Optional[PostV3SocialLookupEmailAsyncFormRequestResponseFormatType]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.post_v3text_to_speech_async_form(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3text_to_speech_async_form( - text_prompt="text_prompt", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**text_prompt:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.List[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**tts_provider:** `typing.Optional[PostV3TextToSpeechAsyncFormRequestTtsProvider]` - -
-
- -
-
- -**uberduck_voice_name:** `typing.Optional[str]` - -
-
- -
-
- -**uberduck_speaking_rate:** `typing.Optional[float]` - -
-
- -
-
- -**google_voice_name:** `typing.Optional[str]` - -
-
- -
-
- -**google_speaking_rate:** `typing.Optional[float]` - -
-
- -
-
- -**google_pitch:** `typing.Optional[float]` - -
-
- -
-
- -**bark_history_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead - -
-
- -
-
- -**elevenlabs_api_key:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_voice_id:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_model:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_stability:** `typing.Optional[float]` - -
-
- -
-
- -**elevenlabs_similarity_boost:** `typing.Optional[float]` - -
-
- -
-
- -**elevenlabs_style:** `typing.Optional[float]` - -
-
- -
-
- -**elevenlabs_speaker_boost:** `typing.Optional[bool]` - -
-
- -
-
- -**azure_voice_name:** `typing.Optional[str]` - -
-
- -
-
- -**openai_voice_name:** `typing.Optional[PostV3TextToSpeechAsyncFormRequestOpenaiVoiceName]` - -
-
- -
-
- -**openai_tts_model:** `typing.Optional[PostV3TextToSpeechAsyncFormRequestOpenaiTtsModel]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.post_v3art_qr_code_async_form(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3art_qr_code_async_form( - text_prompt="text_prompt", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**text_prompt:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.List[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**qr_code_data:** `typing.Optional[str]` - -
-
- -
-
- -**qr_code_input_image:** `typing.Optional[str]` - -
-
- -
-
- -**qr_code_vcard:** `typing.Optional[Vcard]` - -
-
- -
-
- -**qr_code_file:** `typing.Optional[str]` - -
-
- -
-
- -**use_url_shortener:** `typing.Optional[bool]` - -
-
- -
-
- -**negative_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**image_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**image_prompt_controlnet_models:** `typing.Optional[ - typing.List[PostV3ArtQrCodeAsyncFormRequestImagePromptControlnetModelsItem] -]` - -
-
- -
-
- -**image_prompt_strength:** `typing.Optional[float]` - -
-
- -
-
- -**image_prompt_scale:** `typing.Optional[float]` - -
-
- -
-
- -**image_prompt_pos_x:** `typing.Optional[float]` - -
-
- -
-
- -**image_prompt_pos_y:** `typing.Optional[float]` - -
-
- -
-
- -**selected_model:** `typing.Optional[PostV3ArtQrCodeAsyncFormRequestSelectedModel]` - -
-
- -
-
- -**selected_controlnet_model:** `typing.Optional[ - typing.List[PostV3ArtQrCodeAsyncFormRequestSelectedControlnetModelItem] -]` - -
-
- -
-
- -**output_width:** `typing.Optional[int]` - -
-
- -
-
- -**output_height:** `typing.Optional[int]` - -
-
- -
-
- -**guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**controlnet_conditioning_scale:** `typing.Optional[typing.List[float]]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[int]` - -
-
- -
-
- -**scheduler:** `typing.Optional[PostV3ArtQrCodeAsyncFormRequestScheduler]` - -
-
- -
-
- -**seed:** `typing.Optional[int]` - -
-
- -
-
- -**obj_scale:** `typing.Optional[float]` - -
-
- -
-
- -**obj_pos_x:** `typing.Optional[float]` - -
-
- -
-
- -**obj_pos_y:** `typing.Optional[float]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.post_v3asr_async_form(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3asr_async_form( - documents=["documents"], -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**documents:** `typing.List[str]` - -
-
- -
-
- -**functions:** `typing.Optional[typing.List[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**selected_model:** `typing.Optional[PostV3AsrAsyncFormRequestSelectedModel]` - -
-
- -
-
- -**language:** `typing.Optional[str]` - -
-
- -
-
- -**translation_model:** `typing.Optional[PostV3AsrAsyncFormRequestTranslationModel]` - -
-
- -
-
- -**output_format:** `typing.Optional[PostV3AsrAsyncFormRequestOutputFormat]` - -
-
- -
-
- -**google_translate_target:** `typing.Optional[str]` — use `translation_model` & `translation_target` instead. - -
-
- -
-
- -**translation_source:** `typing.Optional[str]` - -
-
- -
-
- -**translation_target:** `typing.Optional[str]` - -
-
- -
-
- -**glossary_document:** `typing.Optional[str]` - -Provide a glossary to customize translation and improve accuracy of domain-specific terms. -If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.post_v3bulk_eval_async_form(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3bulk_eval_async_form( - documents=["documents"], -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**documents:** `typing.List[str]` - - -Upload or link to a CSV or google sheet that contains your sample input data. -For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. -Remember to includes header names in your CSV too. - - -
-
- -
-
- -**functions:** `typing.Optional[typing.List[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**eval_prompts:** `typing.Optional[typing.List[EvalPrompt]]` - - -Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values. -_The `columns` dictionary can be used to reference the spreadsheet columns._ - - -
-
- -
-
- -**agg_functions:** `typing.Optional[typing.List[AggFunction]]` - - -Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats). - - -
-
- -
-
- -**selected_model:** `typing.Optional[PostV3BulkEvalAsyncFormRequestSelectedModel]` - -
-
- -
-
- -**avoid_repetition:** `typing.Optional[bool]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[float]` - -
-
- -
-
- -**max_tokens:** `typing.Optional[int]` - -
-
- -
-
- -**sampling_temperature:** `typing.Optional[float]` - -
-
- -
-
- -**response_format_type:** `typing.Optional[PostV3BulkEvalAsyncFormRequestResponseFormatType]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.post_v3bulk_runner_async_form(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3bulk_runner_async_form( - documents=["documents"], - run_urls=["run_urls"], - input_columns={"key": "value"}, - output_columns={"key": "value"}, -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**documents:** `typing.List[str]` - - -Upload or link to a CSV or google sheet that contains your sample input data. -For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. -Remember to includes header names in your CSV too. - - -
-
- -
-
- -**run_urls:** `typing.List[str]` - - -Provide one or more Gooey.AI workflow runs. -You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them. - - -
-
- -
-
- -**input_columns:** `typing.Dict[str, str]` - - -For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it. - - -
-
- -
-
- -**output_columns:** `typing.Dict[str, str]` - - -For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data. - - -
-
- -
-
- -**functions:** `typing.Optional[typing.List[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**eval_urls:** `typing.Optional[typing.List[str]]` - - -_(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs. - - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.post_v3compare_ai_upscalers_async_form(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3compare_ai_upscalers_async_form( - scale=1, -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**scale:** `int` — The final upsampling scale of the image - -
-
- -
-
- -**functions:** `typing.Optional[typing.List[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**input_image:** `typing.Optional[str]` — Input Image - -
-
- -
-
- -**input_video:** `typing.Optional[str]` — Input Video - -
-
- -
-
- -**selected_models:** `typing.Optional[ - typing.List[PostV3CompareAiUpscalersAsyncFormRequestSelectedModelsItem] -]` - -
-
- -
-
- -**selected_bg_model:** `typing.Optional[typing.Literal["real_esrgan_x2"]]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.post_v3doc_extract_async_form(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3doc_extract_async_form( - documents=["documents"], -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**documents:** `typing.List[str]` - -
-
- -
-
- -**functions:** `typing.Optional[typing.List[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**sheet_url:** `typing.Optional[str]` - -
-
- -
-
- -**selected_asr_model:** `typing.Optional[PostV3DocExtractAsyncFormRequestSelectedAsrModel]` - -
-
- -
-
- -**google_translate_target:** `typing.Optional[str]` - -
-
- -
-
- -**glossary_document:** `typing.Optional[str]` - -Provide a glossary to customize translation and improve accuracy of domain-specific terms. -If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). - -
-
- -
-
- -**task_instructions:** `typing.Optional[str]` - -
-
- -
-
- -**selected_model:** `typing.Optional[PostV3DocExtractAsyncFormRequestSelectedModel]` - -
-
- -
-
- -**avoid_repetition:** `typing.Optional[bool]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[float]` - -
-
- -
-
- -**max_tokens:** `typing.Optional[int]` - -
-
- -
-
- -**sampling_temperature:** `typing.Optional[float]` - -
-
- -
-
- -**response_format_type:** `typing.Optional[PostV3DocExtractAsyncFormRequestResponseFormatType]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.post_v3doc_search_async_form(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3doc_search_async_form( - search_query="search_query", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**search_query:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.List[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**keyword_query:** `typing.Optional[PostV3DocSearchAsyncFormRequestKeywordQuery]` - -
-
- -
-
- -**documents:** `typing.Optional[typing.List[str]]` - -
-
- -
-
- -**max_references:** `typing.Optional[int]` - -
-
- -
-
- -**max_context_words:** `typing.Optional[int]` - -
-
- -
-
- -**scroll_jump:** `typing.Optional[int]` - -
-
- -
-
- -**doc_extract_url:** `typing.Optional[str]` - -
-
- -
-
- -**embedding_model:** `typing.Optional[PostV3DocSearchAsyncFormRequestEmbeddingModel]` - -
-
- -
-
- -**dense_weight:** `typing.Optional[float]` - - -Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. -Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - -
-
- -
-
- -**task_instructions:** `typing.Optional[str]` - -
-
- -
-
- -**query_instructions:** `typing.Optional[str]` - -
-
- -
-
- -**selected_model:** `typing.Optional[PostV3DocSearchAsyncFormRequestSelectedModel]` - -
-
- -
-
- -**citation_style:** `typing.Optional[PostV3DocSearchAsyncFormRequestCitationStyle]` - -
-
- -
-
- -**avoid_repetition:** `typing.Optional[bool]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[float]` - -
-
- -
-
- -**max_tokens:** `typing.Optional[int]` - -
-
- -
-
- -**sampling_temperature:** `typing.Optional[float]` - -
-
- -
-
- -**response_format_type:** `typing.Optional[PostV3DocSearchAsyncFormRequestResponseFormatType]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.post_v3doc_summary_async_form(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3doc_summary_async_form( - documents=["documents"], -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**documents:** `typing.List[str]` - -
-
- -
-
- -**functions:** `typing.Optional[typing.List[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**task_instructions:** `typing.Optional[str]` - -
-
- -
-
- -**merge_instructions:** `typing.Optional[str]` - -
-
- -
-
- -**selected_model:** `typing.Optional[PostV3DocSummaryAsyncFormRequestSelectedModel]` - -
-
- -
-
- -**chain_type:** `typing.Optional[typing.Literal["map_reduce"]]` - -
-
- -
-
- -**selected_asr_model:** `typing.Optional[PostV3DocSummaryAsyncFormRequestSelectedAsrModel]` - -
-
- -
-
- -**google_translate_target:** `typing.Optional[str]` - -
-
- -
-
- -**avoid_repetition:** `typing.Optional[bool]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[float]` - -
-
- -
-
- -**max_tokens:** `typing.Optional[int]` - -
-
- -
-
- -**sampling_temperature:** `typing.Optional[float]` - -
-
- -
-
- -**response_format_type:** `typing.Optional[PostV3DocSummaryAsyncFormRequestResponseFormatType]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.post_v3embeddings_async_form(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3embeddings_async_form( - texts=["texts"], -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**texts:** `typing.List[str]` - -
-
- -
-
- -**functions:** `typing.Optional[typing.List[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**selected_model:** `typing.Optional[PostV3EmbeddingsAsyncFormRequestSelectedModel]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.post_v3functions_async_form(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3functions_async_form() - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**code:** `typing.Optional[str]` — The JS code to be executed. - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used in the code - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.post_v3google_gpt_async_form(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3google_gpt_async_form( - search_query="search_query", - site_filter="site_filter", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**search_query:** `str` - -
-
- -
-
- -**site_filter:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.List[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**task_instructions:** `typing.Optional[str]` - -
-
- -
-
- -**query_instructions:** `typing.Optional[str]` - -
-
- -
-
- -**selected_model:** `typing.Optional[PostV3GoogleGptAsyncFormRequestSelectedModel]` - -
-
- -
-
- -**max_search_urls:** `typing.Optional[int]` - -
-
- -
-
- -**max_references:** `typing.Optional[int]` - -
-
- -
-
- -**max_context_words:** `typing.Optional[int]` - -
-
- -
-
- -**scroll_jump:** `typing.Optional[int]` - -
-
- -
-
- -**embedding_model:** `typing.Optional[PostV3GoogleGptAsyncFormRequestEmbeddingModel]` - -
-
- -
-
- -**dense_weight:** `typing.Optional[float]` - - -Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. -Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - -
-
- -
-
- -**avoid_repetition:** `typing.Optional[bool]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[float]` - -
-
- -
-
- -**max_tokens:** `typing.Optional[int]` - -
-
- -
-
- -**sampling_temperature:** `typing.Optional[float]` - -
-
- -
-
- -**response_format_type:** `typing.Optional[PostV3GoogleGptAsyncFormRequestResponseFormatType]` - -
-
- -
-
- -**serp_search_location:** `typing.Optional[SerpSearchLocation]` - -
-
- -
-
- -**scaleserp_locations:** `typing.Optional[typing.List[str]]` — DEPRECATED: use `serp_search_location` instead - -
-
- -
-
- -**serp_search_type:** `typing.Optional[SerpSearchType]` - -
-
- -
-
- -**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.post_v3related_qna_maker_doc_async_form(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3related_qna_maker_doc_async_form( - search_query="search_query", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**search_query:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.List[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**keyword_query:** `typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestKeywordQuery]` - -
-
- -
-
- -**documents:** `typing.Optional[typing.List[str]]` - -
-
- -
-
- -**max_references:** `typing.Optional[int]` - -
-
- -
-
- -**max_context_words:** `typing.Optional[int]` - -
-
- -
-
- -**scroll_jump:** `typing.Optional[int]` - -
-
- -
-
- -**doc_extract_url:** `typing.Optional[str]` - -
-
- -
-
- -**embedding_model:** `typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestEmbeddingModel]` - -
-
- -
-
- -**dense_weight:** `typing.Optional[float]` - - -Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. -Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - -
-
- -
-
- -**task_instructions:** `typing.Optional[str]` - -
-
- -
-
- -**query_instructions:** `typing.Optional[str]` - -
-
- -
-
- -**selected_model:** `typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestSelectedModel]` - -
-
- -
-
- -**citation_style:** `typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestCitationStyle]` - -
-
- -
-
- -**avoid_repetition:** `typing.Optional[bool]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[float]` - -
-
- -
-
- -**max_tokens:** `typing.Optional[int]` - -
-
- -
-
- -**sampling_temperature:** `typing.Optional[float]` - -
-
- -
-
- -**response_format_type:** `typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestResponseFormatType]` - -
-
- -
-
- -**serp_search_location:** `typing.Optional[SerpSearchLocation]` - -
-
- -
-
- -**scaleserp_locations:** `typing.Optional[typing.List[str]]` — DEPRECATED: use `serp_search_location` instead - -
-
- -
-
- -**serp_search_type:** `typing.Optional[SerpSearchType]` - -
-
- -
-
- -**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.post_v3related_qna_maker_async_form(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3related_qna_maker_async_form( - search_query="search_query", - site_filter="site_filter", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**search_query:** `str` - -
-
- -
-
- -**site_filter:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.List[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**task_instructions:** `typing.Optional[str]` - -
-
- -
-
- -**query_instructions:** `typing.Optional[str]` - -
-
- -
-
- -**selected_model:** `typing.Optional[PostV3RelatedQnaMakerAsyncFormRequestSelectedModel]` - -
-
- -
-
- -**max_search_urls:** `typing.Optional[int]` - -
-
- -
-
- -**max_references:** `typing.Optional[int]` - -
-
- -
-
- -**max_context_words:** `typing.Optional[int]` - -
-
- -
-
- -**scroll_jump:** `typing.Optional[int]` - -
-
- -
-
- -**embedding_model:** `typing.Optional[PostV3RelatedQnaMakerAsyncFormRequestEmbeddingModel]` - -
-
- -
-
- -**dense_weight:** `typing.Optional[float]` - - -Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. -Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - -
-
- -
-
- -**avoid_repetition:** `typing.Optional[bool]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[float]` - -
-
- -
-
- -**max_tokens:** `typing.Optional[int]` - -
-
- -
-
- -**sampling_temperature:** `typing.Optional[float]` - -
-
- -
-
- -**response_format_type:** `typing.Optional[PostV3RelatedQnaMakerAsyncFormRequestResponseFormatType]` - -
-
- -
-
- -**serp_search_location:** `typing.Optional[SerpSearchLocation]` - -
-
- -
-
- -**scaleserp_locations:** `typing.Optional[typing.List[str]]` — DEPRECATED: use `serp_search_location` instead - -
-
- -
-
- -**serp_search_type:** `typing.Optional[SerpSearchType]` - -
-
- -
-
- -**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.post_v3text2audio_async_form(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3text2audio_async_form( - text_prompt="text_prompt", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**text_prompt:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.List[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**negative_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**duration_sec:** `typing.Optional[float]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[int]` - -
-
- -
-
- -**guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**seed:** `typing.Optional[int]` - -
-
- -
-
- -**sd2upscaling:** `typing.Optional[bool]` - -
-
- -
-
- -**selected_models:** `typing.Optional[typing.List[typing.Literal["audio_ldm"]]]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.post_v3translate_async_form(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3translate_async_form() - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**functions:** `typing.Optional[typing.List[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**texts:** `typing.Optional[typing.List[str]]` - -
-
- -
-
- -**selected_model:** `typing.Optional[PostV3TranslateAsyncFormRequestSelectedModel]` - -
-
- -
-
- -**translation_source:** `typing.Optional[str]` - -
-
- -
-
- -**translation_target:** `typing.Optional[str]` - -
-
- -
-
- -**glossary_document:** `typing.Optional[str]` - -Provide a glossary to customize translation and improve accuracy of domain-specific terms. -If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.post_v3video_bots_async_form(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3video_bots_async_form() - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**functions:** `typing.Optional[typing.List[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**input_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**input_audio:** `typing.Optional[str]` - -
-
- -
-
- -**input_images:** `typing.Optional[typing.List[str]]` - -
-
- -
-
- -**input_documents:** `typing.Optional[typing.List[str]]` - -
-
- -
-
- -**doc_extract_url:** `typing.Optional[str]` — Select a workflow to extract text from documents and images. - -
-
- -
-
- -**messages:** `typing.Optional[typing.List[ConversationEntry]]` - -
-
- -
-
- -**bot_script:** `typing.Optional[str]` - -
-
- -
-
- -**selected_model:** `typing.Optional[PostV3VideoBotsAsyncFormRequestSelectedModel]` - -
-
- -
-
- -**document_model:** `typing.Optional[str]` — When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) - -
-
- -
-
- -**task_instructions:** `typing.Optional[str]` - -
-
- -
-
- -**query_instructions:** `typing.Optional[str]` - -
-
- -
-
- -**keyword_instructions:** `typing.Optional[str]` - -
-
- -
-
- -**documents:** `typing.Optional[typing.List[str]]` - -
-
- -
-
- -**max_references:** `typing.Optional[int]` - -
-
- -
-
- -**max_context_words:** `typing.Optional[int]` - -
-
- -
-
- -**scroll_jump:** `typing.Optional[int]` - -
-
- -
-
- -**embedding_model:** `typing.Optional[PostV3VideoBotsAsyncFormRequestEmbeddingModel]` - -
-
- -
-
- -**dense_weight:** `typing.Optional[float]` - - -Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. -Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - -
-
- -
-
- -**citation_style:** `typing.Optional[PostV3VideoBotsAsyncFormRequestCitationStyle]` - -
-
- -
-
- -**use_url_shortener:** `typing.Optional[bool]` - -
-
- -
-
- -**asr_model:** `typing.Optional[PostV3VideoBotsAsyncFormRequestAsrModel]` — Choose a model to transcribe incoming audio messages to text. - -
-
- -
-
- -**asr_language:** `typing.Optional[str]` — Choose a language to transcribe incoming audio messages to text. - -
-
- -
-
- -**translation_model:** `typing.Optional[PostV3VideoBotsAsyncFormRequestTranslationModel]` - -
-
- -
-
- -**user_language:** `typing.Optional[str]` — Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. - -
-
- -
-
- -**input_glossary_document:** `typing.Optional[str]` - - -Translation Glossary for User Langauge -> LLM Language (English) - - -
-
- -
-
- -**output_glossary_document:** `typing.Optional[str]` - - -Translation Glossary for LLM Language (English) -> User Langauge - - -
-
- -
-
- -**lipsync_model:** `typing.Optional[PostV3VideoBotsAsyncFormRequestLipsyncModel]` - -
-
- -
-
- -**tools:** `typing.Optional[typing.List[LlmTools]]` — Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). - -
-
- -
-
- -**avoid_repetition:** `typing.Optional[bool]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[float]` - -
-
- -
-
- -**max_tokens:** `typing.Optional[int]` - -
-
- -
-
- -**sampling_temperature:** `typing.Optional[float]` - -
-
- -
-
- -**response_format_type:** `typing.Optional[PostV3VideoBotsAsyncFormRequestResponseFormatType]` - -
-
- -
-
- -**tts_provider:** `typing.Optional[PostV3VideoBotsAsyncFormRequestTtsProvider]` - -
-
- -
-
- -**uberduck_voice_name:** `typing.Optional[str]` - -
-
- -
-
- -**uberduck_speaking_rate:** `typing.Optional[float]` - -
-
- -
-
- -**google_voice_name:** `typing.Optional[str]` - -
-
- -
-
- -**google_speaking_rate:** `typing.Optional[float]` - -
-
- -
-
- -**google_pitch:** `typing.Optional[float]` - -
-
- -
-
- -**bark_history_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead - -
-
- -
-
- -**elevenlabs_api_key:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_voice_id:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_model:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_stability:** `typing.Optional[float]` - -
-
- -
-
- -**elevenlabs_similarity_boost:** `typing.Optional[float]` - -
-
- -
-
- -**elevenlabs_style:** `typing.Optional[float]` - -
-
- -
-
- -**elevenlabs_speaker_boost:** `typing.Optional[bool]` - -
-
- -
-
- -**azure_voice_name:** `typing.Optional[str]` - -
-
- -
-
- -**openai_voice_name:** `typing.Optional[PostV3VideoBotsAsyncFormRequestOpenaiVoiceName]` - -
-
- -
-
- -**openai_tts_model:** `typing.Optional[PostV3VideoBotsAsyncFormRequestOpenaiTtsModel]` - -
-
- -
-
- -**input_face:** `typing.Optional[str]` - -
-
- -
-
- -**face_padding_top:** `typing.Optional[int]` - -
-
- -
-
- -**face_padding_bottom:** `typing.Optional[int]` - -
-
- -
-
- -**face_padding_left:** `typing.Optional[int]` - -
-
- -
-
- -**face_padding_right:** `typing.Optional[int]` - -
-
- -
-
- -**sadtalker_settings:** `typing.Optional[SadTalkerSettings]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- -
diff --git a/src/gooey/__init__.py b/src/gooey/__init__.py index 7eb63d2..fd8646c 100644 --- a/src/gooey/__init__.py +++ b/src/gooey/__init__.py @@ -120,77 +120,6 @@ ObjectInpaintingPageOutput, ObjectInpaintingPageRequestSelectedModel, ObjectInpaintingPageStatusResponse, - PostV3ArtQrCodeAsyncFormRequestImagePromptControlnetModelsItem, - PostV3ArtQrCodeAsyncFormRequestScheduler, - PostV3ArtQrCodeAsyncFormRequestSelectedControlnetModelItem, - PostV3ArtQrCodeAsyncFormRequestSelectedModel, - PostV3AsrAsyncFormRequestOutputFormat, - PostV3AsrAsyncFormRequestSelectedModel, - PostV3AsrAsyncFormRequestTranslationModel, - PostV3BulkEvalAsyncFormRequestResponseFormatType, - PostV3BulkEvalAsyncFormRequestSelectedModel, - PostV3CompareAiUpscalersAsyncFormRequestSelectedModelsItem, - PostV3CompareLlmAsyncFormRequestResponseFormatType, - PostV3CompareLlmAsyncFormRequestSelectedModelsItem, - PostV3CompareText2ImgAsyncFormRequestScheduler, - PostV3CompareText2ImgAsyncFormRequestSelectedModelsItem, - PostV3DeforumSdAsyncFormRequestSelectedModel, - PostV3DocExtractAsyncFormRequestResponseFormatType, - PostV3DocExtractAsyncFormRequestSelectedAsrModel, - PostV3DocExtractAsyncFormRequestSelectedModel, - PostV3DocSearchAsyncFormRequestCitationStyle, - PostV3DocSearchAsyncFormRequestEmbeddingModel, - PostV3DocSearchAsyncFormRequestKeywordQuery, - PostV3DocSearchAsyncFormRequestResponseFormatType, - PostV3DocSearchAsyncFormRequestSelectedModel, - PostV3DocSummaryAsyncFormRequestResponseFormatType, - PostV3DocSummaryAsyncFormRequestSelectedAsrModel, - PostV3DocSummaryAsyncFormRequestSelectedModel, - PostV3EmailFaceInpaintingAsyncFormRequestSelectedModel, - PostV3EmbeddingsAsyncFormRequestSelectedModel, - PostV3FaceInpaintingAsyncFormRequestSelectedModel, - PostV3GoogleGptAsyncFormRequestEmbeddingModel, - PostV3GoogleGptAsyncFormRequestResponseFormatType, - PostV3GoogleGptAsyncFormRequestSelectedModel, - PostV3GoogleImageGenAsyncFormRequestSelectedModel, - PostV3ImageSegmentationAsyncFormRequestSelectedModel, - PostV3Img2ImgAsyncFormRequestSelectedControlnetModel, - PostV3Img2ImgAsyncFormRequestSelectedControlnetModelItem, - PostV3Img2ImgAsyncFormRequestSelectedModel, - PostV3LipsyncAsyncFormRequestSelectedModel, - PostV3LipsyncTtsAsyncFormRequestOpenaiTtsModel, - PostV3LipsyncTtsAsyncFormRequestOpenaiVoiceName, - PostV3LipsyncTtsAsyncFormRequestSelectedModel, - PostV3LipsyncTtsAsyncFormRequestTtsProvider, - PostV3ObjectInpaintingAsyncFormRequestSelectedModel, - PostV3RelatedQnaMakerAsyncFormRequestEmbeddingModel, - PostV3RelatedQnaMakerAsyncFormRequestResponseFormatType, - PostV3RelatedQnaMakerAsyncFormRequestSelectedModel, - PostV3RelatedQnaMakerDocAsyncFormRequestCitationStyle, - PostV3RelatedQnaMakerDocAsyncFormRequestEmbeddingModel, - PostV3RelatedQnaMakerDocAsyncFormRequestKeywordQuery, - PostV3RelatedQnaMakerDocAsyncFormRequestResponseFormatType, - PostV3RelatedQnaMakerDocAsyncFormRequestSelectedModel, - PostV3SeoSummaryAsyncFormRequestResponseFormatType, - PostV3SeoSummaryAsyncFormRequestSelectedModel, - PostV3SmartGptAsyncFormRequestResponseFormatType, - PostV3SmartGptAsyncFormRequestSelectedModel, - PostV3SocialLookupEmailAsyncFormRequestResponseFormatType, - PostV3SocialLookupEmailAsyncFormRequestSelectedModel, - PostV3TextToSpeechAsyncFormRequestOpenaiTtsModel, - PostV3TextToSpeechAsyncFormRequestOpenaiVoiceName, - PostV3TextToSpeechAsyncFormRequestTtsProvider, - PostV3TranslateAsyncFormRequestSelectedModel, - PostV3VideoBotsAsyncFormRequestAsrModel, - PostV3VideoBotsAsyncFormRequestCitationStyle, - PostV3VideoBotsAsyncFormRequestEmbeddingModel, - PostV3VideoBotsAsyncFormRequestLipsyncModel, - PostV3VideoBotsAsyncFormRequestOpenaiTtsModel, - PostV3VideoBotsAsyncFormRequestOpenaiVoiceName, - PostV3VideoBotsAsyncFormRequestResponseFormatType, - PostV3VideoBotsAsyncFormRequestSelectedModel, - PostV3VideoBotsAsyncFormRequestTranslationModel, - PostV3VideoBotsAsyncFormRequestTtsProvider, PromptTreeNode, PromptTreeNodePrompt, QrCodeGeneratorPageOutput, @@ -429,77 +358,6 @@ "ObjectInpaintingPageRequestSelectedModel", "ObjectInpaintingPageStatusResponse", "PaymentRequiredError", - "PostV3ArtQrCodeAsyncFormRequestImagePromptControlnetModelsItem", - "PostV3ArtQrCodeAsyncFormRequestScheduler", - "PostV3ArtQrCodeAsyncFormRequestSelectedControlnetModelItem", - "PostV3ArtQrCodeAsyncFormRequestSelectedModel", - "PostV3AsrAsyncFormRequestOutputFormat", - "PostV3AsrAsyncFormRequestSelectedModel", - "PostV3AsrAsyncFormRequestTranslationModel", - "PostV3BulkEvalAsyncFormRequestResponseFormatType", - "PostV3BulkEvalAsyncFormRequestSelectedModel", - "PostV3CompareAiUpscalersAsyncFormRequestSelectedModelsItem", - "PostV3CompareLlmAsyncFormRequestResponseFormatType", - "PostV3CompareLlmAsyncFormRequestSelectedModelsItem", - "PostV3CompareText2ImgAsyncFormRequestScheduler", - "PostV3CompareText2ImgAsyncFormRequestSelectedModelsItem", - "PostV3DeforumSdAsyncFormRequestSelectedModel", - "PostV3DocExtractAsyncFormRequestResponseFormatType", - "PostV3DocExtractAsyncFormRequestSelectedAsrModel", - "PostV3DocExtractAsyncFormRequestSelectedModel", - "PostV3DocSearchAsyncFormRequestCitationStyle", - "PostV3DocSearchAsyncFormRequestEmbeddingModel", - "PostV3DocSearchAsyncFormRequestKeywordQuery", - "PostV3DocSearchAsyncFormRequestResponseFormatType", - "PostV3DocSearchAsyncFormRequestSelectedModel", - "PostV3DocSummaryAsyncFormRequestResponseFormatType", - "PostV3DocSummaryAsyncFormRequestSelectedAsrModel", - "PostV3DocSummaryAsyncFormRequestSelectedModel", - "PostV3EmailFaceInpaintingAsyncFormRequestSelectedModel", - "PostV3EmbeddingsAsyncFormRequestSelectedModel", - "PostV3FaceInpaintingAsyncFormRequestSelectedModel", - "PostV3GoogleGptAsyncFormRequestEmbeddingModel", - "PostV3GoogleGptAsyncFormRequestResponseFormatType", - "PostV3GoogleGptAsyncFormRequestSelectedModel", - "PostV3GoogleImageGenAsyncFormRequestSelectedModel", - "PostV3ImageSegmentationAsyncFormRequestSelectedModel", - "PostV3Img2ImgAsyncFormRequestSelectedControlnetModel", - "PostV3Img2ImgAsyncFormRequestSelectedControlnetModelItem", - "PostV3Img2ImgAsyncFormRequestSelectedModel", - "PostV3LipsyncAsyncFormRequestSelectedModel", - "PostV3LipsyncTtsAsyncFormRequestOpenaiTtsModel", - "PostV3LipsyncTtsAsyncFormRequestOpenaiVoiceName", - "PostV3LipsyncTtsAsyncFormRequestSelectedModel", - "PostV3LipsyncTtsAsyncFormRequestTtsProvider", - "PostV3ObjectInpaintingAsyncFormRequestSelectedModel", - "PostV3RelatedQnaMakerAsyncFormRequestEmbeddingModel", - "PostV3RelatedQnaMakerAsyncFormRequestResponseFormatType", - "PostV3RelatedQnaMakerAsyncFormRequestSelectedModel", - "PostV3RelatedQnaMakerDocAsyncFormRequestCitationStyle", - "PostV3RelatedQnaMakerDocAsyncFormRequestEmbeddingModel", - "PostV3RelatedQnaMakerDocAsyncFormRequestKeywordQuery", - "PostV3RelatedQnaMakerDocAsyncFormRequestResponseFormatType", - "PostV3RelatedQnaMakerDocAsyncFormRequestSelectedModel", - "PostV3SeoSummaryAsyncFormRequestResponseFormatType", - "PostV3SeoSummaryAsyncFormRequestSelectedModel", - "PostV3SmartGptAsyncFormRequestResponseFormatType", - "PostV3SmartGptAsyncFormRequestSelectedModel", - "PostV3SocialLookupEmailAsyncFormRequestResponseFormatType", - "PostV3SocialLookupEmailAsyncFormRequestSelectedModel", - "PostV3TextToSpeechAsyncFormRequestOpenaiTtsModel", - "PostV3TextToSpeechAsyncFormRequestOpenaiVoiceName", - "PostV3TextToSpeechAsyncFormRequestTtsProvider", - "PostV3TranslateAsyncFormRequestSelectedModel", - "PostV3VideoBotsAsyncFormRequestAsrModel", - "PostV3VideoBotsAsyncFormRequestCitationStyle", - "PostV3VideoBotsAsyncFormRequestEmbeddingModel", - "PostV3VideoBotsAsyncFormRequestLipsyncModel", - "PostV3VideoBotsAsyncFormRequestOpenaiTtsModel", - "PostV3VideoBotsAsyncFormRequestOpenaiVoiceName", - "PostV3VideoBotsAsyncFormRequestResponseFormatType", - "PostV3VideoBotsAsyncFormRequestSelectedModel", - "PostV3VideoBotsAsyncFormRequestTranslationModel", - "PostV3VideoBotsAsyncFormRequestTtsProvider", "PromptTreeNode", "PromptTreeNodePrompt", "QrCodeGeneratorPageOutput", diff --git a/src/gooey/client.py b/src/gooey/client.py index 3758806..acf6501 100644 --- a/src/gooey/client.py +++ b/src/gooey/client.py @@ -139,160 +139,6 @@ from .types.related_qn_a_doc_page_request_response_format_type import RelatedQnADocPageRequestResponseFormatType from .types.related_qn_a_doc_page_output import RelatedQnADocPageOutput from .types.related_qn_a_doc_page_status_response import RelatedQnADocPageStatusResponse -from .types.chyron_plant_page_status_response import ChyronPlantPageStatusResponse -from .types.post_v3compare_llm_async_form_request_selected_models_item import ( - PostV3CompareLlmAsyncFormRequestSelectedModelsItem, -) -from .types.post_v3compare_llm_async_form_request_response_format_type import ( - PostV3CompareLlmAsyncFormRequestResponseFormatType, -) -from .types.post_v3compare_text2img_async_form_request_selected_models_item import ( - PostV3CompareText2ImgAsyncFormRequestSelectedModelsItem, -) -from .types.post_v3compare_text2img_async_form_request_scheduler import PostV3CompareText2ImgAsyncFormRequestScheduler -from .types.post_v3deforum_sd_async_form_request_selected_model import PostV3DeforumSdAsyncFormRequestSelectedModel -from .types.post_v3email_face_inpainting_async_form_request_selected_model import ( - PostV3EmailFaceInpaintingAsyncFormRequestSelectedModel, -) -from .types.post_v3face_inpainting_async_form_request_selected_model import ( - PostV3FaceInpaintingAsyncFormRequestSelectedModel, -) -from .types.post_v3google_image_gen_async_form_request_selected_model import ( - PostV3GoogleImageGenAsyncFormRequestSelectedModel, -) -from .types.post_v3image_segmentation_async_form_request_selected_model import ( - PostV3ImageSegmentationAsyncFormRequestSelectedModel, -) -from .types.post_v3img2img_async_form_request_selected_model import PostV3Img2ImgAsyncFormRequestSelectedModel -from .types.post_v3img2img_async_form_request_selected_controlnet_model import ( - PostV3Img2ImgAsyncFormRequestSelectedControlnetModel, -) -from .types.training_data_model import TrainingDataModel -from .types.letter_writer_page_status_response import LetterWriterPageStatusResponse -from .types.post_v3lipsync_async_form_request_selected_model import PostV3LipsyncAsyncFormRequestSelectedModel -from .types.lipsync_page_status_response import LipsyncPageStatusResponse -from .types.post_v3lipsync_tts_async_form_request_tts_provider import PostV3LipsyncTtsAsyncFormRequestTtsProvider -from .types.post_v3lipsync_tts_async_form_request_openai_voice_name import ( - PostV3LipsyncTtsAsyncFormRequestOpenaiVoiceName, -) -from .types.post_v3lipsync_tts_async_form_request_openai_tts_model import PostV3LipsyncTtsAsyncFormRequestOpenaiTtsModel -from .types.post_v3lipsync_tts_async_form_request_selected_model import PostV3LipsyncTtsAsyncFormRequestSelectedModel -from .types.post_v3object_inpainting_async_form_request_selected_model import ( - PostV3ObjectInpaintingAsyncFormRequestSelectedModel, -) -from .types.post_v3seo_summary_async_form_request_selected_model import PostV3SeoSummaryAsyncFormRequestSelectedModel -from .types.post_v3seo_summary_async_form_request_response_format_type import ( - PostV3SeoSummaryAsyncFormRequestResponseFormatType, -) -from .types.post_v3smart_gpt_async_form_request_selected_model import PostV3SmartGptAsyncFormRequestSelectedModel -from .types.post_v3smart_gpt_async_form_request_response_format_type import ( - PostV3SmartGptAsyncFormRequestResponseFormatType, -) -from .types.smart_gpt_page_status_response import SmartGptPageStatusResponse -from .types.post_v3social_lookup_email_async_form_request_selected_model import ( - PostV3SocialLookupEmailAsyncFormRequestSelectedModel, -) -from .types.post_v3social_lookup_email_async_form_request_response_format_type import ( - PostV3SocialLookupEmailAsyncFormRequestResponseFormatType, -) -from .types.post_v3text_to_speech_async_form_request_tts_provider import PostV3TextToSpeechAsyncFormRequestTtsProvider -from .types.post_v3text_to_speech_async_form_request_openai_voice_name import ( - PostV3TextToSpeechAsyncFormRequestOpenaiVoiceName, -) -from .types.post_v3text_to_speech_async_form_request_openai_tts_model import ( - PostV3TextToSpeechAsyncFormRequestOpenaiTtsModel, -) -from .types.post_v3art_qr_code_async_form_request_image_prompt_controlnet_models_item import ( - PostV3ArtQrCodeAsyncFormRequestImagePromptControlnetModelsItem, -) -from .types.post_v3art_qr_code_async_form_request_selected_model import PostV3ArtQrCodeAsyncFormRequestSelectedModel -from .types.post_v3art_qr_code_async_form_request_selected_controlnet_model_item import ( - PostV3ArtQrCodeAsyncFormRequestSelectedControlnetModelItem, -) -from .types.post_v3art_qr_code_async_form_request_scheduler import PostV3ArtQrCodeAsyncFormRequestScheduler -from .types.post_v3asr_async_form_request_selected_model import PostV3AsrAsyncFormRequestSelectedModel -from .types.post_v3asr_async_form_request_translation_model import PostV3AsrAsyncFormRequestTranslationModel -from .types.post_v3asr_async_form_request_output_format import PostV3AsrAsyncFormRequestOutputFormat -from .types.eval_prompt import EvalPrompt -from .types.agg_function import AggFunction -from .types.post_v3bulk_eval_async_form_request_selected_model import PostV3BulkEvalAsyncFormRequestSelectedModel -from .types.post_v3bulk_eval_async_form_request_response_format_type import ( - PostV3BulkEvalAsyncFormRequestResponseFormatType, -) -from .types.bulk_eval_page_status_response import BulkEvalPageStatusResponse -from .types.post_v3compare_ai_upscalers_async_form_request_selected_models_item import ( - PostV3CompareAiUpscalersAsyncFormRequestSelectedModelsItem, -) -from .types.post_v3doc_extract_async_form_request_selected_asr_model import ( - PostV3DocExtractAsyncFormRequestSelectedAsrModel, -) -from .types.post_v3doc_extract_async_form_request_selected_model import PostV3DocExtractAsyncFormRequestSelectedModel -from .types.post_v3doc_extract_async_form_request_response_format_type import ( - PostV3DocExtractAsyncFormRequestResponseFormatType, -) -from .types.post_v3doc_search_async_form_request_keyword_query import PostV3DocSearchAsyncFormRequestKeywordQuery -from .types.post_v3doc_search_async_form_request_embedding_model import PostV3DocSearchAsyncFormRequestEmbeddingModel -from .types.post_v3doc_search_async_form_request_selected_model import PostV3DocSearchAsyncFormRequestSelectedModel -from .types.post_v3doc_search_async_form_request_citation_style import PostV3DocSearchAsyncFormRequestCitationStyle -from .types.post_v3doc_search_async_form_request_response_format_type import ( - PostV3DocSearchAsyncFormRequestResponseFormatType, -) -from .types.post_v3doc_summary_async_form_request_selected_model import PostV3DocSummaryAsyncFormRequestSelectedModel -from .types.post_v3doc_summary_async_form_request_selected_asr_model import ( - PostV3DocSummaryAsyncFormRequestSelectedAsrModel, -) -from .types.post_v3doc_summary_async_form_request_response_format_type import ( - PostV3DocSummaryAsyncFormRequestResponseFormatType, -) -from .types.post_v3embeddings_async_form_request_selected_model import PostV3EmbeddingsAsyncFormRequestSelectedModel -from .types.functions_page_status_response import FunctionsPageStatusResponse -from .types.post_v3google_gpt_async_form_request_selected_model import PostV3GoogleGptAsyncFormRequestSelectedModel -from .types.post_v3google_gpt_async_form_request_embedding_model import PostV3GoogleGptAsyncFormRequestEmbeddingModel -from .types.post_v3google_gpt_async_form_request_response_format_type import ( - PostV3GoogleGptAsyncFormRequestResponseFormatType, -) -from .types.post_v3related_qna_maker_doc_async_form_request_keyword_query import ( - PostV3RelatedQnaMakerDocAsyncFormRequestKeywordQuery, -) -from .types.post_v3related_qna_maker_doc_async_form_request_embedding_model import ( - PostV3RelatedQnaMakerDocAsyncFormRequestEmbeddingModel, -) -from .types.post_v3related_qna_maker_doc_async_form_request_selected_model import ( - PostV3RelatedQnaMakerDocAsyncFormRequestSelectedModel, -) -from .types.post_v3related_qna_maker_doc_async_form_request_citation_style import ( - PostV3RelatedQnaMakerDocAsyncFormRequestCitationStyle, -) -from .types.post_v3related_qna_maker_doc_async_form_request_response_format_type import ( - PostV3RelatedQnaMakerDocAsyncFormRequestResponseFormatType, -) -from .types.post_v3related_qna_maker_async_form_request_selected_model import ( - PostV3RelatedQnaMakerAsyncFormRequestSelectedModel, -) -from .types.post_v3related_qna_maker_async_form_request_embedding_model import ( - PostV3RelatedQnaMakerAsyncFormRequestEmbeddingModel, -) -from .types.post_v3related_qna_maker_async_form_request_response_format_type import ( - PostV3RelatedQnaMakerAsyncFormRequestResponseFormatType, -) -from .types.post_v3translate_async_form_request_selected_model import PostV3TranslateAsyncFormRequestSelectedModel -from .types.conversation_entry import ConversationEntry -from .types.post_v3video_bots_async_form_request_selected_model import PostV3VideoBotsAsyncFormRequestSelectedModel -from .types.post_v3video_bots_async_form_request_embedding_model import PostV3VideoBotsAsyncFormRequestEmbeddingModel -from .types.post_v3video_bots_async_form_request_citation_style import PostV3VideoBotsAsyncFormRequestCitationStyle -from .types.post_v3video_bots_async_form_request_asr_model import PostV3VideoBotsAsyncFormRequestAsrModel -from .types.post_v3video_bots_async_form_request_translation_model import ( - PostV3VideoBotsAsyncFormRequestTranslationModel, -) -from .types.post_v3video_bots_async_form_request_lipsync_model import PostV3VideoBotsAsyncFormRequestLipsyncModel -from .types.llm_tools import LlmTools -from .types.post_v3video_bots_async_form_request_response_format_type import ( - PostV3VideoBotsAsyncFormRequestResponseFormatType, -) -from .types.post_v3video_bots_async_form_request_tts_provider import PostV3VideoBotsAsyncFormRequestTtsProvider -from .types.post_v3video_bots_async_form_request_openai_voice_name import PostV3VideoBotsAsyncFormRequestOpenaiVoiceName -from .types.post_v3video_bots_async_form_request_openai_tts_model import PostV3VideoBotsAsyncFormRequestOpenaiTtsModel -from .types.video_bots_page_status_response import VideoBotsPageStatusResponse from .core.client_wrapper import AsyncClientWrapper from .copilot_integrations.client import AsyncCopilotIntegrationsClient from .copilot_for_your_enterprise.client import AsyncCopilotForYourEnterpriseClient @@ -4521,120 +4367,129 @@ def health_status_get( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3chyron_plant_async_form( - self, - *, - midi_notes: str, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - midi_notes_prompt: typing.Optional[str] = None, - chyron_prompt: typing.Optional[str] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> ChyronPlantPageStatusResponse: - """ - Parameters - ---------- - midi_notes : str - functions : typing.Optional[typing.List[RecipeFunction]] +class AsyncGooey: + """ + Use this class to access the different functions within the SDK. You can instantiate any number of clients with different configuration that will propagate to these functions. - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments + Parameters + ---------- + base_url : typing.Optional[str] + The base url to use for requests from the client. - midi_notes_prompt : typing.Optional[str] + environment : GooeyEnvironment + The environment to use for requests from the client. from .environment import GooeyEnvironment - chyron_prompt : typing.Optional[str] - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] - Request-specific configuration. + Defaults to GooeyEnvironment.DEFAULT - Returns - ------- - ChyronPlantPageStatusResponse - Successful Response - Examples - -------- - from gooey import Gooey - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3chyron_plant_async_form( - midi_notes="midi_notes", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/ChyronPlant/async/form", - method="POST", - data={ - "functions": functions, - "variables": variables, - "midi_notes": midi_notes, - "midi_notes_prompt": midi_notes_prompt, - "chyron_prompt": chyron_prompt, - "settings": settings, - }, - files={}, - request_options=request_options, - omit=OMIT, + api_key : typing.Optional[typing.Union[str, typing.Callable[[], str]]] + timeout : typing.Optional[float] + The timeout to be used, in seconds, for requests. By default the timeout is 60 seconds, unless a custom httpx client is used, in which case this default is not enforced. + + follow_redirects : typing.Optional[bool] + Whether the default httpx client follows redirects or not, this is irrelevant if a custom httpx client is passed in. + + httpx_client : typing.Optional[httpx.AsyncClient] + The httpx client to use for making requests, a preconfigured client is used by default, however this is useful should you want to pass in any custom httpx configuration. + + Examples + -------- + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + """ + + def __init__( + self, + *, + base_url: typing.Optional[str] = None, + environment: GooeyEnvironment = GooeyEnvironment.DEFAULT, + api_key: typing.Optional[typing.Union[str, typing.Callable[[], str]]] = os.getenv("GOOEY_API_KEY"), + timeout: typing.Optional[float] = None, + follow_redirects: typing.Optional[bool] = True, + httpx_client: typing.Optional[httpx.AsyncClient] = None, + ): + _defaulted_timeout = timeout if timeout is not None else 60 if httpx_client is None else None + if api_key is None: + raise ApiError(body="The client must be instantiated be either passing in api_key or setting GOOEY_API_KEY") + self._client_wrapper = AsyncClientWrapper( + base_url=_get_base_url(base_url=base_url, environment=environment), + api_key=api_key, + httpx_client=httpx_client + if httpx_client is not None + else httpx.AsyncClient(timeout=_defaulted_timeout, follow_redirects=follow_redirects) + if follow_redirects is not None + else httpx.AsyncClient(timeout=_defaulted_timeout), + timeout=_defaulted_timeout, ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - ChyronPlantPageStatusResponse, - parse_obj_as( - type_=ChyronPlantPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + self.copilot_integrations = AsyncCopilotIntegrationsClient(client_wrapper=self._client_wrapper) + self.copilot_for_your_enterprise = AsyncCopilotForYourEnterpriseClient(client_wrapper=self._client_wrapper) + self.evaluator = AsyncEvaluatorClient(client_wrapper=self._client_wrapper) + self.smart_gpt = AsyncSmartGptClient(client_wrapper=self._client_wrapper) + self.functions = AsyncFunctionsClient(client_wrapper=self._client_wrapper) + self.lip_syncing = AsyncLipSyncingClient(client_wrapper=self._client_wrapper) + self.misc = AsyncMiscClient(client_wrapper=self._client_wrapper) - def post_v3compare_llm_async_form( + async def animate( self, *, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - input_prompt: typing.Optional[str] = None, - selected_models: typing.Optional[typing.List[PostV3CompareLlmAsyncFormRequestSelectedModelsItem]] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[PostV3CompareLlmAsyncFormRequestResponseFormatType] = None, - settings: typing.Optional[RunSettings] = None, + animation_prompts: typing.Sequence[AnimationPrompt], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + max_frames: typing.Optional[int] = OMIT, + selected_model: typing.Optional[DeforumSdPageRequestSelectedModel] = OMIT, + animation_mode: typing.Optional[str] = OMIT, + zoom: typing.Optional[str] = OMIT, + translation_x: typing.Optional[str] = OMIT, + translation_y: typing.Optional[str] = OMIT, + rotation3d_x: typing.Optional[str] = OMIT, + rotation3d_y: typing.Optional[str] = OMIT, + rotation3d_z: typing.Optional[str] = OMIT, + fps: typing.Optional[int] = OMIT, + seed: typing.Optional[int] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> CompareLlmPageStatusResponse: + ) -> typing.Optional[DeforumSdPageOutput]: """ Parameters ---------- - functions : typing.Optional[typing.List[RecipeFunction]] + animation_prompts : typing.Sequence[AnimationPrompt] + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - input_prompt : typing.Optional[str] + max_frames : typing.Optional[int] - selected_models : typing.Optional[typing.List[PostV3CompareLlmAsyncFormRequestSelectedModelsItem]] + selected_model : typing.Optional[DeforumSdPageRequestSelectedModel] - avoid_repetition : typing.Optional[bool] + animation_mode : typing.Optional[str] - num_outputs : typing.Optional[int] + zoom : typing.Optional[str] - quality : typing.Optional[float] + translation_x : typing.Optional[str] - max_tokens : typing.Optional[int] + translation_y : typing.Optional[str] - sampling_temperature : typing.Optional[float] + rotation3d_x : typing.Optional[str] + + rotation3d_y : typing.Optional[str] + + rotation3d_z : typing.Optional[str] + + fps : typing.Optional[int] - response_format_type : typing.Optional[PostV3CompareLlmAsyncFormRequestResponseFormatType] + seed : typing.Optional[int] settings : typing.Optional[RunSettings] @@ -4643,9026 +4498,204 @@ def post_v3compare_llm_async_form( Returns ------- - CompareLlmPageStatusResponse + typing.Optional[DeforumSdPageOutput] Successful Response Examples -------- - from gooey import Gooey + import asyncio - client = Gooey( + from gooey import AnimationPrompt, AsyncGooey + + client = AsyncGooey( api_key="YOUR_API_KEY", ) - client.post_v3compare_llm_async_form() + + + async def main() -> None: + await client.animate( + animation_prompts=[ + AnimationPrompt( + frame="frame", + prompt="prompt", + ) + ], + ) + + + asyncio.run(main()) """ - _response = self._client_wrapper.httpx_client.request( - "v3/CompareLLM/async/form", + _response = await self._client_wrapper.httpx_client.request( + "v3/DeforumSD/async", method="POST", - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, - "input_prompt": input_prompt, - "selected_models": selected_models, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, + "animation_prompts": animation_prompts, + "max_frames": max_frames, + "selected_model": selected_model, + "animation_mode": animation_mode, + "zoom": zoom, + "translation_x": translation_x, + "translation_y": translation_y, + "rotation_3d_x": rotation3d_x, + "rotation_3d_y": rotation3d_y, + "rotation_3d_z": rotation3d_z, + "fps": fps, + "seed": seed, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast( - CompareLlmPageStatusResponse, + _parsed_response = typing.cast( + DeforumSdPageStatusResponse, parse_obj_as( - type_=CompareLlmPageStatusResponse, # type: ignore + type_=DeforumSdPageStatusResponse, # type: ignore object_=_response.json(), ), ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def post_v3compare_text2img_async_form( - self, - *, - text_prompt: str, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - negative_prompt: typing.Optional[str] = None, - output_width: typing.Optional[int] = None, - output_height: typing.Optional[int] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[int] = None, - dall_e3quality: typing.Optional[str] = None, - dall_e3style: typing.Optional[str] = None, - guidance_scale: typing.Optional[float] = None, - seed: typing.Optional[int] = None, - sd2upscaling: typing.Optional[bool] = None, - selected_models: typing.Optional[typing.List[PostV3CompareText2ImgAsyncFormRequestSelectedModelsItem]] = None, - scheduler: typing.Optional[PostV3CompareText2ImgAsyncFormRequestScheduler] = None, - edit_instruction: typing.Optional[str] = None, - image_guidance_scale: typing.Optional[float] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> CompareText2ImgPageStatusResponse: - """ - Parameters - ---------- - text_prompt : str - - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - negative_prompt : typing.Optional[str] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - dall_e3quality : typing.Optional[str] - - dall_e3style : typing.Optional[str] - - guidance_scale : typing.Optional[float] - - seed : typing.Optional[int] - - sd2upscaling : typing.Optional[bool] - - selected_models : typing.Optional[typing.List[PostV3CompareText2ImgAsyncFormRequestSelectedModelsItem]] - - scheduler : typing.Optional[PostV3CompareText2ImgAsyncFormRequestScheduler] - - edit_instruction : typing.Optional[str] - - image_guidance_scale : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - CompareText2ImgPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3compare_text2img_async_form( - text_prompt="text_prompt", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/CompareText2Img/async/form", - method="POST", - data={ - "functions": functions, - "variables": variables, - "text_prompt": text_prompt, - "negative_prompt": negative_prompt, - "output_width": output_width, - "output_height": output_height, - "num_outputs": num_outputs, - "quality": quality, - "dall_e_3_quality": dall_e3quality, - "dall_e_3_style": dall_e3style, - "guidance_scale": guidance_scale, - "seed": seed, - "sd_2_upscaling": sd2upscaling, - "selected_models": selected_models, - "scheduler": scheduler, - "edit_instruction": edit_instruction, - "image_guidance_scale": image_guidance_scale, - "settings": settings, - }, - files={}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - CompareText2ImgPageStatusResponse, - parse_obj_as( - type_=CompareText2ImgPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def post_v3deforum_sd_async_form( - self, - *, - animation_prompts: typing.List[AnimationPrompt], - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - max_frames: typing.Optional[int] = None, - selected_model: typing.Optional[PostV3DeforumSdAsyncFormRequestSelectedModel] = None, - animation_mode: typing.Optional[str] = None, - zoom: typing.Optional[str] = None, - translation_x: typing.Optional[str] = None, - translation_y: typing.Optional[str] = None, - rotation3d_x: typing.Optional[str] = None, - rotation3d_y: typing.Optional[str] = None, - rotation3d_z: typing.Optional[str] = None, - fps: typing.Optional[int] = None, - seed: typing.Optional[int] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> DeforumSdPageStatusResponse: - """ - Parameters - ---------- - animation_prompts : typing.List[AnimationPrompt] - - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - max_frames : typing.Optional[int] - - selected_model : typing.Optional[PostV3DeforumSdAsyncFormRequestSelectedModel] - - animation_mode : typing.Optional[str] - - zoom : typing.Optional[str] - - translation_x : typing.Optional[str] - - translation_y : typing.Optional[str] - - rotation3d_x : typing.Optional[str] - - rotation3d_y : typing.Optional[str] - - rotation3d_z : typing.Optional[str] - - fps : typing.Optional[int] - - seed : typing.Optional[int] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DeforumSdPageStatusResponse - Successful Response - - Examples - -------- - from gooey import AnimationPrompt, Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3deforum_sd_async_form( - animation_prompts=[ - AnimationPrompt( - frame="frame", - prompt="prompt", - ) - ], - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/DeforumSD/async/form", - method="POST", - data={ - "functions": functions, - "variables": variables, - "animation_prompts": animation_prompts, - "max_frames": max_frames, - "selected_model": selected_model, - "animation_mode": animation_mode, - "zoom": zoom, - "translation_x": translation_x, - "translation_y": translation_y, - "rotation_3d_x": rotation3d_x, - "rotation_3d_y": rotation3d_y, - "rotation_3d_z": rotation3d_z, - "fps": fps, - "seed": seed, - "settings": settings, - }, - files={}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - DeforumSdPageStatusResponse, - parse_obj_as( - type_=DeforumSdPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def post_v3email_face_inpainting_async_form( - self, - *, - text_prompt: str, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - email_address: typing.Optional[str] = None, - twitter_handle: typing.Optional[str] = None, - face_scale: typing.Optional[float] = None, - face_pos_x: typing.Optional[float] = None, - face_pos_y: typing.Optional[float] = None, - selected_model: typing.Optional[PostV3EmailFaceInpaintingAsyncFormRequestSelectedModel] = None, - negative_prompt: typing.Optional[str] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[int] = None, - upscale_factor: typing.Optional[float] = None, - output_width: typing.Optional[int] = None, - output_height: typing.Optional[int] = None, - guidance_scale: typing.Optional[float] = None, - should_send_email: typing.Optional[bool] = None, - email_from: typing.Optional[str] = None, - email_cc: typing.Optional[str] = None, - email_bcc: typing.Optional[str] = None, - email_subject: typing.Optional[str] = None, - email_body: typing.Optional[str] = None, - email_body_enable_html: typing.Optional[bool] = None, - fallback_email_body: typing.Optional[str] = None, - seed: typing.Optional[int] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> EmailFaceInpaintingPageStatusResponse: - """ - Parameters - ---------- - text_prompt : str - - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - email_address : typing.Optional[str] - - twitter_handle : typing.Optional[str] - - face_scale : typing.Optional[float] - - face_pos_x : typing.Optional[float] - - face_pos_y : typing.Optional[float] - - selected_model : typing.Optional[PostV3EmailFaceInpaintingAsyncFormRequestSelectedModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - upscale_factor : typing.Optional[float] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - should_send_email : typing.Optional[bool] - - email_from : typing.Optional[str] - - email_cc : typing.Optional[str] - - email_bcc : typing.Optional[str] - - email_subject : typing.Optional[str] - - email_body : typing.Optional[str] - - email_body_enable_html : typing.Optional[bool] - - fallback_email_body : typing.Optional[str] - - seed : typing.Optional[int] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EmailFaceInpaintingPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3email_face_inpainting_async_form( - text_prompt="text_prompt", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/EmailFaceInpainting/async/form", - method="POST", - data={ - "functions": functions, - "variables": variables, - "email_address": email_address, - "twitter_handle": twitter_handle, - "text_prompt": text_prompt, - "face_scale": face_scale, - "face_pos_x": face_pos_x, - "face_pos_y": face_pos_y, - "selected_model": selected_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "upscale_factor": upscale_factor, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "should_send_email": should_send_email, - "email_from": email_from, - "email_cc": email_cc, - "email_bcc": email_bcc, - "email_subject": email_subject, - "email_body": email_body, - "email_body_enable_html": email_body_enable_html, - "fallback_email_body": fallback_email_body, - "seed": seed, - "settings": settings, - }, - files={}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - EmailFaceInpaintingPageStatusResponse, - parse_obj_as( - type_=EmailFaceInpaintingPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def post_v3face_inpainting_async_form( - self, - *, - input_image: str, - text_prompt: str, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - face_scale: typing.Optional[float] = None, - face_pos_x: typing.Optional[float] = None, - face_pos_y: typing.Optional[float] = None, - selected_model: typing.Optional[PostV3FaceInpaintingAsyncFormRequestSelectedModel] = None, - negative_prompt: typing.Optional[str] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[int] = None, - upscale_factor: typing.Optional[float] = None, - output_width: typing.Optional[int] = None, - output_height: typing.Optional[int] = None, - guidance_scale: typing.Optional[float] = None, - seed: typing.Optional[int] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> FaceInpaintingPageStatusResponse: - """ - Parameters - ---------- - input_image : str - - text_prompt : str - - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - face_scale : typing.Optional[float] - - face_pos_x : typing.Optional[float] - - face_pos_y : typing.Optional[float] - - selected_model : typing.Optional[PostV3FaceInpaintingAsyncFormRequestSelectedModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - upscale_factor : typing.Optional[float] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - seed : typing.Optional[int] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - FaceInpaintingPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3face_inpainting_async_form( - input_image="input_image", - text_prompt="text_prompt", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/FaceInpainting/async/form", - method="POST", - data={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "text_prompt": text_prompt, - "face_scale": face_scale, - "face_pos_x": face_pos_x, - "face_pos_y": face_pos_y, - "selected_model": selected_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "upscale_factor": upscale_factor, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "seed": seed, - "settings": settings, - }, - files={}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - FaceInpaintingPageStatusResponse, - parse_obj_as( - type_=FaceInpaintingPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def post_v3google_image_gen_async_form( - self, - *, - search_query: str, - text_prompt: str, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - serp_search_location: typing.Optional[SerpSearchLocation] = None, - scaleserp_locations: typing.Optional[typing.List[str]] = None, - selected_model: typing.Optional[PostV3GoogleImageGenAsyncFormRequestSelectedModel] = None, - negative_prompt: typing.Optional[str] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[int] = None, - guidance_scale: typing.Optional[float] = None, - prompt_strength: typing.Optional[float] = None, - sd2upscaling: typing.Optional[bool] = None, - seed: typing.Optional[int] = None, - image_guidance_scale: typing.Optional[float] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> GoogleImageGenPageStatusResponse: - """ - Parameters - ---------- - search_query : str - - text_prompt : str - - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.List[str]] - DEPRECATED: use `serp_search_location` instead - - selected_model : typing.Optional[PostV3GoogleImageGenAsyncFormRequestSelectedModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - prompt_strength : typing.Optional[float] - - sd2upscaling : typing.Optional[bool] - - seed : typing.Optional[int] - - image_guidance_scale : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - GoogleImageGenPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3google_image_gen_async_form( - search_query="search_query", - text_prompt="text_prompt", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/GoogleImageGen/async/form", - method="POST", - data={ - "functions": functions, - "variables": variables, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "search_query": search_query, - "text_prompt": text_prompt, - "selected_model": selected_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "guidance_scale": guidance_scale, - "prompt_strength": prompt_strength, - "sd_2_upscaling": sd2upscaling, - "seed": seed, - "image_guidance_scale": image_guidance_scale, - "settings": settings, - }, - files={}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - GoogleImageGenPageStatusResponse, - parse_obj_as( - type_=GoogleImageGenPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def post_v3image_segmentation_async_form( - self, - *, - input_image: str, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - selected_model: typing.Optional[PostV3ImageSegmentationAsyncFormRequestSelectedModel] = None, - mask_threshold: typing.Optional[float] = None, - rect_persepective_transform: typing.Optional[bool] = None, - reflection_opacity: typing.Optional[float] = None, - obj_scale: typing.Optional[float] = None, - obj_pos_x: typing.Optional[float] = None, - obj_pos_y: typing.Optional[float] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> ImageSegmentationPageStatusResponse: - """ - Parameters - ---------- - input_image : str - - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - selected_model : typing.Optional[PostV3ImageSegmentationAsyncFormRequestSelectedModel] - - mask_threshold : typing.Optional[float] - - rect_persepective_transform : typing.Optional[bool] - - reflection_opacity : typing.Optional[float] - - obj_scale : typing.Optional[float] - - obj_pos_x : typing.Optional[float] - - obj_pos_y : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ImageSegmentationPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3image_segmentation_async_form( - input_image="input_image", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/ImageSegmentation/async/form", - method="POST", - data={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "selected_model": selected_model, - "mask_threshold": mask_threshold, - "rect_persepective_transform": rect_persepective_transform, - "reflection_opacity": reflection_opacity, - "obj_scale": obj_scale, - "obj_pos_x": obj_pos_x, - "obj_pos_y": obj_pos_y, - "settings": settings, - }, - files={}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - ImageSegmentationPageStatusResponse, - parse_obj_as( - type_=ImageSegmentationPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def post_v3img2img_async_form( - self, - *, - input_image: str, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - text_prompt: typing.Optional[str] = None, - selected_model: typing.Optional[PostV3Img2ImgAsyncFormRequestSelectedModel] = None, - selected_controlnet_model: typing.Optional[PostV3Img2ImgAsyncFormRequestSelectedControlnetModel] = None, - negative_prompt: typing.Optional[str] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[int] = None, - output_width: typing.Optional[int] = None, - output_height: typing.Optional[int] = None, - guidance_scale: typing.Optional[float] = None, - prompt_strength: typing.Optional[float] = None, - controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None, - seed: typing.Optional[int] = None, - image_guidance_scale: typing.Optional[float] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> Img2ImgPageStatusResponse: - """ - Parameters - ---------- - input_image : str - - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - text_prompt : typing.Optional[str] - - selected_model : typing.Optional[PostV3Img2ImgAsyncFormRequestSelectedModel] - - selected_controlnet_model : typing.Optional[PostV3Img2ImgAsyncFormRequestSelectedControlnetModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - prompt_strength : typing.Optional[float] - - controlnet_conditioning_scale : typing.Optional[typing.List[float]] - - seed : typing.Optional[int] - - image_guidance_scale : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - Img2ImgPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3img2img_async_form( - input_image="input_image", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/Img2Img/async/form", - method="POST", - data={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "text_prompt": text_prompt, - "selected_model": selected_model, - "selected_controlnet_model": selected_controlnet_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "prompt_strength": prompt_strength, - "controlnet_conditioning_scale": controlnet_conditioning_scale, - "seed": seed, - "image_guidance_scale": image_guidance_scale, - "settings": settings, - }, - files={}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - Img2ImgPageStatusResponse, - parse_obj_as( - type_=Img2ImgPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def post_v3letter_writer_async_form( - self, - *, - action_id: str, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - prompt_header: typing.Optional[str] = None, - example_letters: typing.Optional[typing.List[TrainingDataModel]] = None, - lm_selected_api: typing.Optional[str] = None, - lm_selected_engine: typing.Optional[str] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - lm_sampling_temperature: typing.Optional[float] = None, - api_http_method: typing.Optional[str] = None, - api_url: typing.Optional[str] = None, - api_headers: typing.Optional[str] = None, - api_json_body: typing.Optional[str] = None, - input_prompt: typing.Optional[str] = None, - strip_html2text: typing.Optional[bool] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> LetterWriterPageStatusResponse: - """ - Parameters - ---------- - action_id : str - - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - prompt_header : typing.Optional[str] - - example_letters : typing.Optional[typing.List[TrainingDataModel]] - - lm_selected_api : typing.Optional[str] - - lm_selected_engine : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - lm_sampling_temperature : typing.Optional[float] - - api_http_method : typing.Optional[str] - - api_url : typing.Optional[str] - - api_headers : typing.Optional[str] - - api_json_body : typing.Optional[str] - - input_prompt : typing.Optional[str] - - strip_html2text : typing.Optional[bool] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - LetterWriterPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3letter_writer_async_form( - action_id="action_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/LetterWriter/async/form", - method="POST", - data={ - "functions": functions, - "variables": variables, - "action_id": action_id, - "prompt_header": prompt_header, - "example_letters": example_letters, - "lm_selected_api": lm_selected_api, - "lm_selected_engine": lm_selected_engine, - "num_outputs": num_outputs, - "quality": quality, - "lm_sampling_temperature": lm_sampling_temperature, - "api_http_method": api_http_method, - "api_url": api_url, - "api_headers": api_headers, - "api_json_body": api_json_body, - "input_prompt": input_prompt, - "strip_html_2_text": strip_html2text, - "settings": settings, - }, - files={}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - LetterWriterPageStatusResponse, - parse_obj_as( - type_=LetterWriterPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def post_v3lipsync_async_form( - self, - *, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - input_face: typing.Optional[str] = None, - face_padding_top: typing.Optional[int] = None, - face_padding_bottom: typing.Optional[int] = None, - face_padding_left: typing.Optional[int] = None, - face_padding_right: typing.Optional[int] = None, - sadtalker_settings: typing.Optional[SadTalkerSettings] = None, - selected_model: typing.Optional[PostV3LipsyncAsyncFormRequestSelectedModel] = None, - input_audio: typing.Optional[str] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> LipsyncPageStatusResponse: - """ - Parameters - ---------- - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - input_face : typing.Optional[str] - - face_padding_top : typing.Optional[int] - - face_padding_bottom : typing.Optional[int] - - face_padding_left : typing.Optional[int] - - face_padding_right : typing.Optional[int] - - sadtalker_settings : typing.Optional[SadTalkerSettings] - - selected_model : typing.Optional[PostV3LipsyncAsyncFormRequestSelectedModel] - - input_audio : typing.Optional[str] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - LipsyncPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3lipsync_async_form() - """ - _response = self._client_wrapper.httpx_client.request( - "v3/Lipsync/async/form", - method="POST", - data={ - "functions": functions, - "variables": variables, - "input_face": input_face, - "face_padding_top": face_padding_top, - "face_padding_bottom": face_padding_bottom, - "face_padding_left": face_padding_left, - "face_padding_right": face_padding_right, - "sadtalker_settings": sadtalker_settings, - "selected_model": selected_model, - "input_audio": input_audio, - "settings": settings, - }, - files={}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - LipsyncPageStatusResponse, - parse_obj_as( - type_=LipsyncPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def post_v3lipsync_tts_async_form( - self, - *, - text_prompt: str, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - tts_provider: typing.Optional[PostV3LipsyncTtsAsyncFormRequestTtsProvider] = None, - uberduck_voice_name: typing.Optional[str] = None, - uberduck_speaking_rate: typing.Optional[float] = None, - google_voice_name: typing.Optional[str] = None, - google_speaking_rate: typing.Optional[float] = None, - google_pitch: typing.Optional[float] = None, - bark_history_prompt: typing.Optional[str] = None, - elevenlabs_voice_name: typing.Optional[str] = None, - elevenlabs_api_key: typing.Optional[str] = None, - elevenlabs_voice_id: typing.Optional[str] = None, - elevenlabs_model: typing.Optional[str] = None, - elevenlabs_stability: typing.Optional[float] = None, - elevenlabs_similarity_boost: typing.Optional[float] = None, - elevenlabs_style: typing.Optional[float] = None, - elevenlabs_speaker_boost: typing.Optional[bool] = None, - azure_voice_name: typing.Optional[str] = None, - openai_voice_name: typing.Optional[PostV3LipsyncTtsAsyncFormRequestOpenaiVoiceName] = None, - openai_tts_model: typing.Optional[PostV3LipsyncTtsAsyncFormRequestOpenaiTtsModel] = None, - input_face: typing.Optional[str] = None, - face_padding_top: typing.Optional[int] = None, - face_padding_bottom: typing.Optional[int] = None, - face_padding_left: typing.Optional[int] = None, - face_padding_right: typing.Optional[int] = None, - sadtalker_settings: typing.Optional[SadTalkerSettings] = None, - selected_model: typing.Optional[PostV3LipsyncTtsAsyncFormRequestSelectedModel] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> LipsyncTtsPageStatusResponse: - """ - Parameters - ---------- - text_prompt : str - - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - tts_provider : typing.Optional[PostV3LipsyncTtsAsyncFormRequestTtsProvider] - - uberduck_voice_name : typing.Optional[str] - - uberduck_speaking_rate : typing.Optional[float] - - google_voice_name : typing.Optional[str] - - google_speaking_rate : typing.Optional[float] - - google_pitch : typing.Optional[float] - - bark_history_prompt : typing.Optional[str] - - elevenlabs_voice_name : typing.Optional[str] - Use `elevenlabs_voice_id` instead - - elevenlabs_api_key : typing.Optional[str] - - elevenlabs_voice_id : typing.Optional[str] - - elevenlabs_model : typing.Optional[str] - - elevenlabs_stability : typing.Optional[float] - - elevenlabs_similarity_boost : typing.Optional[float] - - elevenlabs_style : typing.Optional[float] - - elevenlabs_speaker_boost : typing.Optional[bool] - - azure_voice_name : typing.Optional[str] - - openai_voice_name : typing.Optional[PostV3LipsyncTtsAsyncFormRequestOpenaiVoiceName] - - openai_tts_model : typing.Optional[PostV3LipsyncTtsAsyncFormRequestOpenaiTtsModel] - - input_face : typing.Optional[str] - - face_padding_top : typing.Optional[int] - - face_padding_bottom : typing.Optional[int] - - face_padding_left : typing.Optional[int] - - face_padding_right : typing.Optional[int] - - sadtalker_settings : typing.Optional[SadTalkerSettings] - - selected_model : typing.Optional[PostV3LipsyncTtsAsyncFormRequestSelectedModel] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - LipsyncTtsPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3lipsync_tts_async_form( - text_prompt="text_prompt", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/LipsyncTTS/async/form", - method="POST", - data={ - "functions": functions, - "variables": variables, - "text_prompt": text_prompt, - "tts_provider": tts_provider, - "uberduck_voice_name": uberduck_voice_name, - "uberduck_speaking_rate": uberduck_speaking_rate, - "google_voice_name": google_voice_name, - "google_speaking_rate": google_speaking_rate, - "google_pitch": google_pitch, - "bark_history_prompt": bark_history_prompt, - "elevenlabs_voice_name": elevenlabs_voice_name, - "elevenlabs_api_key": elevenlabs_api_key, - "elevenlabs_voice_id": elevenlabs_voice_id, - "elevenlabs_model": elevenlabs_model, - "elevenlabs_stability": elevenlabs_stability, - "elevenlabs_similarity_boost": elevenlabs_similarity_boost, - "elevenlabs_style": elevenlabs_style, - "elevenlabs_speaker_boost": elevenlabs_speaker_boost, - "azure_voice_name": azure_voice_name, - "openai_voice_name": openai_voice_name, - "openai_tts_model": openai_tts_model, - "input_face": input_face, - "face_padding_top": face_padding_top, - "face_padding_bottom": face_padding_bottom, - "face_padding_left": face_padding_left, - "face_padding_right": face_padding_right, - "sadtalker_settings": sadtalker_settings, - "selected_model": selected_model, - "settings": settings, - }, - files={}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - LipsyncTtsPageStatusResponse, - parse_obj_as( - type_=LipsyncTtsPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def post_v3object_inpainting_async_form( - self, - *, - input_image: str, - text_prompt: str, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - obj_scale: typing.Optional[float] = None, - obj_pos_x: typing.Optional[float] = None, - obj_pos_y: typing.Optional[float] = None, - mask_threshold: typing.Optional[float] = None, - selected_model: typing.Optional[PostV3ObjectInpaintingAsyncFormRequestSelectedModel] = None, - negative_prompt: typing.Optional[str] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[int] = None, - output_width: typing.Optional[int] = None, - output_height: typing.Optional[int] = None, - guidance_scale: typing.Optional[float] = None, - sd2upscaling: typing.Optional[bool] = None, - seed: typing.Optional[int] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> ObjectInpaintingPageStatusResponse: - """ - Parameters - ---------- - input_image : str - - text_prompt : str - - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - obj_scale : typing.Optional[float] - - obj_pos_x : typing.Optional[float] - - obj_pos_y : typing.Optional[float] - - mask_threshold : typing.Optional[float] - - selected_model : typing.Optional[PostV3ObjectInpaintingAsyncFormRequestSelectedModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - sd2upscaling : typing.Optional[bool] - - seed : typing.Optional[int] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ObjectInpaintingPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3object_inpainting_async_form( - input_image="input_image", - text_prompt="text_prompt", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/ObjectInpainting/async/form", - method="POST", - data={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "text_prompt": text_prompt, - "obj_scale": obj_scale, - "obj_pos_x": obj_pos_x, - "obj_pos_y": obj_pos_y, - "mask_threshold": mask_threshold, - "selected_model": selected_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "sd_2_upscaling": sd2upscaling, - "seed": seed, - "settings": settings, - }, - files={}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - ObjectInpaintingPageStatusResponse, - parse_obj_as( - type_=ObjectInpaintingPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def post_v3seo_summary_async_form( - self, - *, - search_query: str, - keywords: str, - title: str, - company_url: str, - task_instructions: typing.Optional[str] = None, - enable_html: typing.Optional[bool] = None, - selected_model: typing.Optional[PostV3SeoSummaryAsyncFormRequestSelectedModel] = None, - max_search_urls: typing.Optional[int] = None, - enable_crosslinks: typing.Optional[bool] = None, - seed: typing.Optional[int] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[PostV3SeoSummaryAsyncFormRequestResponseFormatType] = None, - serp_search_location: typing.Optional[SerpSearchLocation] = None, - scaleserp_locations: typing.Optional[typing.List[str]] = None, - serp_search_type: typing.Optional[SerpSearchType] = None, - scaleserp_search_field: typing.Optional[str] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> SeoSummaryPageStatusResponse: - """ - Parameters - ---------- - search_query : str - - keywords : str - - title : str - - company_url : str - - task_instructions : typing.Optional[str] - - enable_html : typing.Optional[bool] - - selected_model : typing.Optional[PostV3SeoSummaryAsyncFormRequestSelectedModel] - - max_search_urls : typing.Optional[int] - - enable_crosslinks : typing.Optional[bool] - - seed : typing.Optional[int] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[PostV3SeoSummaryAsyncFormRequestResponseFormatType] - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.List[str]] - DEPRECATED: use `serp_search_location` instead - - serp_search_type : typing.Optional[SerpSearchType] - - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SeoSummaryPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3seo_summary_async_form( - search_query="search_query", - keywords="keywords", - title="title", - company_url="company_url", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/SEOSummary/async/form", - method="POST", - data={ - "search_query": search_query, - "keywords": keywords, - "title": title, - "company_url": company_url, - "task_instructions": task_instructions, - "enable_html": enable_html, - "selected_model": selected_model, - "max_search_urls": max_search_urls, - "enable_crosslinks": enable_crosslinks, - "seed": seed, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, - "settings": settings, - }, - files={}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - SeoSummaryPageStatusResponse, - parse_obj_as( - type_=SeoSummaryPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def post_v3smart_gpt_async_form( - self, - *, - input_prompt: str, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - cot_prompt: typing.Optional[str] = None, - reflexion_prompt: typing.Optional[str] = None, - dera_prompt: typing.Optional[str] = None, - selected_model: typing.Optional[PostV3SmartGptAsyncFormRequestSelectedModel] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[PostV3SmartGptAsyncFormRequestResponseFormatType] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> SmartGptPageStatusResponse: - """ - Parameters - ---------- - input_prompt : str - - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - cot_prompt : typing.Optional[str] - - reflexion_prompt : typing.Optional[str] - - dera_prompt : typing.Optional[str] - - selected_model : typing.Optional[PostV3SmartGptAsyncFormRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[PostV3SmartGptAsyncFormRequestResponseFormatType] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SmartGptPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3smart_gpt_async_form( - input_prompt="input_prompt", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/SmartGPT/async/form", - method="POST", - data={ - "functions": functions, - "variables": variables, - "input_prompt": input_prompt, - "cot_prompt": cot_prompt, - "reflexion_prompt": reflexion_prompt, - "dera_prompt": dera_prompt, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "settings": settings, - }, - files={}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - SmartGptPageStatusResponse, - parse_obj_as( - type_=SmartGptPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def post_v3social_lookup_email_async_form( - self, - *, - email_address: str, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - input_prompt: typing.Optional[str] = None, - selected_model: typing.Optional[PostV3SocialLookupEmailAsyncFormRequestSelectedModel] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[PostV3SocialLookupEmailAsyncFormRequestResponseFormatType] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> SocialLookupEmailPageStatusResponse: - """ - Parameters - ---------- - email_address : str - - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - input_prompt : typing.Optional[str] - - selected_model : typing.Optional[PostV3SocialLookupEmailAsyncFormRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[PostV3SocialLookupEmailAsyncFormRequestResponseFormatType] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SocialLookupEmailPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3social_lookup_email_async_form( - email_address="email_address", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/SocialLookupEmail/async/form", - method="POST", - data={ - "functions": functions, - "variables": variables, - "email_address": email_address, - "input_prompt": input_prompt, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "settings": settings, - }, - files={}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - SocialLookupEmailPageStatusResponse, - parse_obj_as( - type_=SocialLookupEmailPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def post_v3text_to_speech_async_form( - self, - *, - text_prompt: str, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - tts_provider: typing.Optional[PostV3TextToSpeechAsyncFormRequestTtsProvider] = None, - uberduck_voice_name: typing.Optional[str] = None, - uberduck_speaking_rate: typing.Optional[float] = None, - google_voice_name: typing.Optional[str] = None, - google_speaking_rate: typing.Optional[float] = None, - google_pitch: typing.Optional[float] = None, - bark_history_prompt: typing.Optional[str] = None, - elevenlabs_voice_name: typing.Optional[str] = None, - elevenlabs_api_key: typing.Optional[str] = None, - elevenlabs_voice_id: typing.Optional[str] = None, - elevenlabs_model: typing.Optional[str] = None, - elevenlabs_stability: typing.Optional[float] = None, - elevenlabs_similarity_boost: typing.Optional[float] = None, - elevenlabs_style: typing.Optional[float] = None, - elevenlabs_speaker_boost: typing.Optional[bool] = None, - azure_voice_name: typing.Optional[str] = None, - openai_voice_name: typing.Optional[PostV3TextToSpeechAsyncFormRequestOpenaiVoiceName] = None, - openai_tts_model: typing.Optional[PostV3TextToSpeechAsyncFormRequestOpenaiTtsModel] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> TextToSpeechPageStatusResponse: - """ - Parameters - ---------- - text_prompt : str - - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - tts_provider : typing.Optional[PostV3TextToSpeechAsyncFormRequestTtsProvider] - - uberduck_voice_name : typing.Optional[str] - - uberduck_speaking_rate : typing.Optional[float] - - google_voice_name : typing.Optional[str] - - google_speaking_rate : typing.Optional[float] - - google_pitch : typing.Optional[float] - - bark_history_prompt : typing.Optional[str] - - elevenlabs_voice_name : typing.Optional[str] - Use `elevenlabs_voice_id` instead - - elevenlabs_api_key : typing.Optional[str] - - elevenlabs_voice_id : typing.Optional[str] - - elevenlabs_model : typing.Optional[str] - - elevenlabs_stability : typing.Optional[float] - - elevenlabs_similarity_boost : typing.Optional[float] - - elevenlabs_style : typing.Optional[float] - - elevenlabs_speaker_boost : typing.Optional[bool] - - azure_voice_name : typing.Optional[str] - - openai_voice_name : typing.Optional[PostV3TextToSpeechAsyncFormRequestOpenaiVoiceName] - - openai_tts_model : typing.Optional[PostV3TextToSpeechAsyncFormRequestOpenaiTtsModel] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - TextToSpeechPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3text_to_speech_async_form( - text_prompt="text_prompt", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/TextToSpeech/async/form", - method="POST", - data={ - "functions": functions, - "variables": variables, - "text_prompt": text_prompt, - "tts_provider": tts_provider, - "uberduck_voice_name": uberduck_voice_name, - "uberduck_speaking_rate": uberduck_speaking_rate, - "google_voice_name": google_voice_name, - "google_speaking_rate": google_speaking_rate, - "google_pitch": google_pitch, - "bark_history_prompt": bark_history_prompt, - "elevenlabs_voice_name": elevenlabs_voice_name, - "elevenlabs_api_key": elevenlabs_api_key, - "elevenlabs_voice_id": elevenlabs_voice_id, - "elevenlabs_model": elevenlabs_model, - "elevenlabs_stability": elevenlabs_stability, - "elevenlabs_similarity_boost": elevenlabs_similarity_boost, - "elevenlabs_style": elevenlabs_style, - "elevenlabs_speaker_boost": elevenlabs_speaker_boost, - "azure_voice_name": azure_voice_name, - "openai_voice_name": openai_voice_name, - "openai_tts_model": openai_tts_model, - "settings": settings, - }, - files={}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - TextToSpeechPageStatusResponse, - parse_obj_as( - type_=TextToSpeechPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def post_v3art_qr_code_async_form( - self, - *, - text_prompt: str, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - qr_code_data: typing.Optional[str] = None, - qr_code_input_image: typing.Optional[str] = None, - qr_code_vcard: typing.Optional[Vcard] = None, - qr_code_file: typing.Optional[str] = None, - use_url_shortener: typing.Optional[bool] = None, - negative_prompt: typing.Optional[str] = None, - image_prompt: typing.Optional[str] = None, - image_prompt_controlnet_models: typing.Optional[ - typing.List[PostV3ArtQrCodeAsyncFormRequestImagePromptControlnetModelsItem] - ] = None, - image_prompt_strength: typing.Optional[float] = None, - image_prompt_scale: typing.Optional[float] = None, - image_prompt_pos_x: typing.Optional[float] = None, - image_prompt_pos_y: typing.Optional[float] = None, - selected_model: typing.Optional[PostV3ArtQrCodeAsyncFormRequestSelectedModel] = None, - selected_controlnet_model: typing.Optional[ - typing.List[PostV3ArtQrCodeAsyncFormRequestSelectedControlnetModelItem] - ] = None, - output_width: typing.Optional[int] = None, - output_height: typing.Optional[int] = None, - guidance_scale: typing.Optional[float] = None, - controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[int] = None, - scheduler: typing.Optional[PostV3ArtQrCodeAsyncFormRequestScheduler] = None, - seed: typing.Optional[int] = None, - obj_scale: typing.Optional[float] = None, - obj_pos_x: typing.Optional[float] = None, - obj_pos_y: typing.Optional[float] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> QrCodeGeneratorPageStatusResponse: - """ - Parameters - ---------- - text_prompt : str - - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - qr_code_data : typing.Optional[str] - - qr_code_input_image : typing.Optional[str] - - qr_code_vcard : typing.Optional[Vcard] - - qr_code_file : typing.Optional[str] - - use_url_shortener : typing.Optional[bool] - - negative_prompt : typing.Optional[str] - - image_prompt : typing.Optional[str] - - image_prompt_controlnet_models : typing.Optional[typing.List[PostV3ArtQrCodeAsyncFormRequestImagePromptControlnetModelsItem]] - - image_prompt_strength : typing.Optional[float] - - image_prompt_scale : typing.Optional[float] - - image_prompt_pos_x : typing.Optional[float] - - image_prompt_pos_y : typing.Optional[float] - - selected_model : typing.Optional[PostV3ArtQrCodeAsyncFormRequestSelectedModel] - - selected_controlnet_model : typing.Optional[typing.List[PostV3ArtQrCodeAsyncFormRequestSelectedControlnetModelItem]] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - controlnet_conditioning_scale : typing.Optional[typing.List[float]] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - scheduler : typing.Optional[PostV3ArtQrCodeAsyncFormRequestScheduler] - - seed : typing.Optional[int] - - obj_scale : typing.Optional[float] - - obj_pos_x : typing.Optional[float] - - obj_pos_y : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - QrCodeGeneratorPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3art_qr_code_async_form( - text_prompt="text_prompt", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/art-qr-code/async/form", - method="POST", - data={ - "functions": functions, - "variables": variables, - "qr_code_data": qr_code_data, - "qr_code_input_image": qr_code_input_image, - "qr_code_vcard": qr_code_vcard, - "qr_code_file": qr_code_file, - "use_url_shortener": use_url_shortener, - "text_prompt": text_prompt, - "negative_prompt": negative_prompt, - "image_prompt": image_prompt, - "image_prompt_controlnet_models": image_prompt_controlnet_models, - "image_prompt_strength": image_prompt_strength, - "image_prompt_scale": image_prompt_scale, - "image_prompt_pos_x": image_prompt_pos_x, - "image_prompt_pos_y": image_prompt_pos_y, - "selected_model": selected_model, - "selected_controlnet_model": selected_controlnet_model, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "controlnet_conditioning_scale": controlnet_conditioning_scale, - "num_outputs": num_outputs, - "quality": quality, - "scheduler": scheduler, - "seed": seed, - "obj_scale": obj_scale, - "obj_pos_x": obj_pos_x, - "obj_pos_y": obj_pos_y, - "settings": settings, - }, - files={}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - QrCodeGeneratorPageStatusResponse, - parse_obj_as( - type_=QrCodeGeneratorPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def post_v3asr_async_form( - self, - *, - documents: typing.List[str], - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - selected_model: typing.Optional[PostV3AsrAsyncFormRequestSelectedModel] = None, - language: typing.Optional[str] = None, - translation_model: typing.Optional[PostV3AsrAsyncFormRequestTranslationModel] = None, - output_format: typing.Optional[PostV3AsrAsyncFormRequestOutputFormat] = None, - google_translate_target: typing.Optional[str] = None, - translation_source: typing.Optional[str] = None, - translation_target: typing.Optional[str] = None, - glossary_document: typing.Optional[str] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsrPageStatusResponse: - """ - Parameters - ---------- - documents : typing.List[str] - - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - selected_model : typing.Optional[PostV3AsrAsyncFormRequestSelectedModel] - - language : typing.Optional[str] - - translation_model : typing.Optional[PostV3AsrAsyncFormRequestTranslationModel] - - output_format : typing.Optional[PostV3AsrAsyncFormRequestOutputFormat] - - google_translate_target : typing.Optional[str] - use `translation_model` & `translation_target` instead. - - translation_source : typing.Optional[str] - - translation_target : typing.Optional[str] - - glossary_document : typing.Optional[str] - Provide a glossary to customize translation and improve accuracy of domain-specific terms. - If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsrPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3asr_async_form( - documents=["documents"], - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/asr/async/form", - method="POST", - data={ - "functions": functions, - "variables": variables, - "documents": documents, - "selected_model": selected_model, - "language": language, - "translation_model": translation_model, - "output_format": output_format, - "google_translate_target": google_translate_target, - "translation_source": translation_source, - "translation_target": translation_target, - "glossary_document": glossary_document, - "settings": settings, - }, - files={}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - AsrPageStatusResponse, - parse_obj_as( - type_=AsrPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def post_v3bulk_eval_async_form( - self, - *, - documents: typing.List[str], - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - eval_prompts: typing.Optional[typing.List[EvalPrompt]] = None, - agg_functions: typing.Optional[typing.List[AggFunction]] = None, - selected_model: typing.Optional[PostV3BulkEvalAsyncFormRequestSelectedModel] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[PostV3BulkEvalAsyncFormRequestResponseFormatType] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> BulkEvalPageStatusResponse: - """ - Parameters - ---------- - documents : typing.List[str] - - Upload or link to a CSV or google sheet that contains your sample input data. - For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. - Remember to includes header names in your CSV too. - - - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - eval_prompts : typing.Optional[typing.List[EvalPrompt]] - - Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values. - _The `columns` dictionary can be used to reference the spreadsheet columns._ - - - agg_functions : typing.Optional[typing.List[AggFunction]] - - Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats). - - - selected_model : typing.Optional[PostV3BulkEvalAsyncFormRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[PostV3BulkEvalAsyncFormRequestResponseFormatType] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - BulkEvalPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3bulk_eval_async_form( - documents=["documents"], - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/bulk-eval/async/form", - method="POST", - data={ - "functions": functions, - "variables": variables, - "documents": documents, - "eval_prompts": eval_prompts, - "agg_functions": agg_functions, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "settings": settings, - }, - files={}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - BulkEvalPageStatusResponse, - parse_obj_as( - type_=BulkEvalPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def post_v3bulk_runner_async_form( - self, - *, - documents: typing.List[str], - run_urls: typing.List[str], - input_columns: typing.Dict[str, str], - output_columns: typing.Dict[str, str], - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - eval_urls: typing.Optional[typing.List[str]] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> BulkRunnerPageStatusResponse: - """ - Parameters - ---------- - documents : typing.List[str] - - Upload or link to a CSV or google sheet that contains your sample input data. - For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. - Remember to includes header names in your CSV too. - - - run_urls : typing.List[str] - - Provide one or more Gooey.AI workflow runs. - You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them. - - - input_columns : typing.Dict[str, str] - - For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it. - - - output_columns : typing.Dict[str, str] - - For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data. - - - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - eval_urls : typing.Optional[typing.List[str]] - - _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs. - - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - BulkRunnerPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3bulk_runner_async_form( - documents=["documents"], - run_urls=["run_urls"], - input_columns={"key": "value"}, - output_columns={"key": "value"}, - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/bulk-runner/async/form", - method="POST", - data={ - "functions": functions, - "variables": variables, - "documents": documents, - "run_urls": run_urls, - "input_columns": input_columns, - "output_columns": output_columns, - "eval_urls": eval_urls, - "settings": settings, - }, - files={}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - BulkRunnerPageStatusResponse, - parse_obj_as( - type_=BulkRunnerPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def post_v3compare_ai_upscalers_async_form( - self, - *, - scale: int, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - input_image: typing.Optional[str] = None, - input_video: typing.Optional[str] = None, - selected_models: typing.Optional[ - typing.List[PostV3CompareAiUpscalersAsyncFormRequestSelectedModelsItem] - ] = None, - selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> CompareUpscalerPageStatusResponse: - """ - Parameters - ---------- - scale : int - The final upsampling scale of the image - - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - input_image : typing.Optional[str] - Input Image - - input_video : typing.Optional[str] - Input Video - - selected_models : typing.Optional[typing.List[PostV3CompareAiUpscalersAsyncFormRequestSelectedModelsItem]] - - selected_bg_model : typing.Optional[typing.Literal["real_esrgan_x2"]] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - CompareUpscalerPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3compare_ai_upscalers_async_form( - scale=1, - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/compare-ai-upscalers/async/form", - method="POST", - data={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "input_video": input_video, - "scale": scale, - "selected_models": selected_models, - "selected_bg_model": selected_bg_model, - "settings": settings, - }, - files={}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - CompareUpscalerPageStatusResponse, - parse_obj_as( - type_=CompareUpscalerPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def post_v3doc_extract_async_form( - self, - *, - documents: typing.List[str], - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - sheet_url: typing.Optional[str] = None, - selected_asr_model: typing.Optional[PostV3DocExtractAsyncFormRequestSelectedAsrModel] = None, - google_translate_target: typing.Optional[str] = None, - glossary_document: typing.Optional[str] = None, - task_instructions: typing.Optional[str] = None, - selected_model: typing.Optional[PostV3DocExtractAsyncFormRequestSelectedModel] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[PostV3DocExtractAsyncFormRequestResponseFormatType] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> DocExtractPageStatusResponse: - """ - Parameters - ---------- - documents : typing.List[str] - - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - sheet_url : typing.Optional[str] - - selected_asr_model : typing.Optional[PostV3DocExtractAsyncFormRequestSelectedAsrModel] - - google_translate_target : typing.Optional[str] - - glossary_document : typing.Optional[str] - Provide a glossary to customize translation and improve accuracy of domain-specific terms. - If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). - - task_instructions : typing.Optional[str] - - selected_model : typing.Optional[PostV3DocExtractAsyncFormRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[PostV3DocExtractAsyncFormRequestResponseFormatType] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DocExtractPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3doc_extract_async_form( - documents=["documents"], - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/doc-extract/async/form", - method="POST", - data={ - "functions": functions, - "variables": variables, - "documents": documents, - "sheet_url": sheet_url, - "selected_asr_model": selected_asr_model, - "google_translate_target": google_translate_target, - "glossary_document": glossary_document, - "task_instructions": task_instructions, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "settings": settings, - }, - files={}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - DocExtractPageStatusResponse, - parse_obj_as( - type_=DocExtractPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def post_v3doc_search_async_form( - self, - *, - search_query: str, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - keyword_query: typing.Optional[PostV3DocSearchAsyncFormRequestKeywordQuery] = None, - documents: typing.Optional[typing.List[str]] = None, - max_references: typing.Optional[int] = None, - max_context_words: typing.Optional[int] = None, - scroll_jump: typing.Optional[int] = None, - doc_extract_url: typing.Optional[str] = None, - embedding_model: typing.Optional[PostV3DocSearchAsyncFormRequestEmbeddingModel] = None, - dense_weight: typing.Optional[float] = None, - task_instructions: typing.Optional[str] = None, - query_instructions: typing.Optional[str] = None, - selected_model: typing.Optional[PostV3DocSearchAsyncFormRequestSelectedModel] = None, - citation_style: typing.Optional[PostV3DocSearchAsyncFormRequestCitationStyle] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[PostV3DocSearchAsyncFormRequestResponseFormatType] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> DocSearchPageStatusResponse: - """ - Parameters - ---------- - search_query : str - - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - keyword_query : typing.Optional[PostV3DocSearchAsyncFormRequestKeywordQuery] - - documents : typing.Optional[typing.List[str]] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - doc_extract_url : typing.Optional[str] - - embedding_model : typing.Optional[PostV3DocSearchAsyncFormRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - selected_model : typing.Optional[PostV3DocSearchAsyncFormRequestSelectedModel] - - citation_style : typing.Optional[PostV3DocSearchAsyncFormRequestCitationStyle] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[PostV3DocSearchAsyncFormRequestResponseFormatType] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DocSearchPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3doc_search_async_form( - search_query="search_query", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/doc-search/async/form", - method="POST", - data={ - "functions": functions, - "variables": variables, - "search_query": search_query, - "keyword_query": keyword_query, - "documents": documents, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "doc_extract_url": doc_extract_url, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "selected_model": selected_model, - "citation_style": citation_style, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "settings": settings, - }, - files={}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - DocSearchPageStatusResponse, - parse_obj_as( - type_=DocSearchPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def post_v3doc_summary_async_form( - self, - *, - documents: typing.List[str], - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - task_instructions: typing.Optional[str] = None, - merge_instructions: typing.Optional[str] = None, - selected_model: typing.Optional[PostV3DocSummaryAsyncFormRequestSelectedModel] = None, - chain_type: typing.Optional[typing.Literal["map_reduce"]] = None, - selected_asr_model: typing.Optional[PostV3DocSummaryAsyncFormRequestSelectedAsrModel] = None, - google_translate_target: typing.Optional[str] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[PostV3DocSummaryAsyncFormRequestResponseFormatType] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> DocSummaryPageStatusResponse: - """ - Parameters - ---------- - documents : typing.List[str] - - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - task_instructions : typing.Optional[str] - - merge_instructions : typing.Optional[str] - - selected_model : typing.Optional[PostV3DocSummaryAsyncFormRequestSelectedModel] - - chain_type : typing.Optional[typing.Literal["map_reduce"]] - - selected_asr_model : typing.Optional[PostV3DocSummaryAsyncFormRequestSelectedAsrModel] - - google_translate_target : typing.Optional[str] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[PostV3DocSummaryAsyncFormRequestResponseFormatType] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DocSummaryPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3doc_summary_async_form( - documents=["documents"], - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/doc-summary/async/form", - method="POST", - data={ - "functions": functions, - "variables": variables, - "documents": documents, - "task_instructions": task_instructions, - "merge_instructions": merge_instructions, - "selected_model": selected_model, - "chain_type": chain_type, - "selected_asr_model": selected_asr_model, - "google_translate_target": google_translate_target, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "settings": settings, - }, - files={}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - DocSummaryPageStatusResponse, - parse_obj_as( - type_=DocSummaryPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def post_v3embeddings_async_form( - self, - *, - texts: typing.List[str], - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - selected_model: typing.Optional[PostV3EmbeddingsAsyncFormRequestSelectedModel] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> EmbeddingsPageStatusResponse: - """ - Parameters - ---------- - texts : typing.List[str] - - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - selected_model : typing.Optional[PostV3EmbeddingsAsyncFormRequestSelectedModel] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EmbeddingsPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3embeddings_async_form( - texts=["texts"], - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/embeddings/async/form", - method="POST", - data={ - "functions": functions, - "variables": variables, - "texts": texts, - "selected_model": selected_model, - "settings": settings, - }, - files={}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - EmbeddingsPageStatusResponse, - parse_obj_as( - type_=EmbeddingsPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def post_v3functions_async_form( - self, - *, - code: typing.Optional[str] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> FunctionsPageStatusResponse: - """ - Parameters - ---------- - code : typing.Optional[str] - The JS code to be executed. - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used in the code - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - FunctionsPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3functions_async_form() - """ - _response = self._client_wrapper.httpx_client.request( - "v3/functions/async/form", - method="POST", - data={ - "code": code, - "variables": variables, - "settings": settings, - }, - files={}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - FunctionsPageStatusResponse, - parse_obj_as( - type_=FunctionsPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def post_v3google_gpt_async_form( - self, - *, - search_query: str, - site_filter: str, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - task_instructions: typing.Optional[str] = None, - query_instructions: typing.Optional[str] = None, - selected_model: typing.Optional[PostV3GoogleGptAsyncFormRequestSelectedModel] = None, - max_search_urls: typing.Optional[int] = None, - max_references: typing.Optional[int] = None, - max_context_words: typing.Optional[int] = None, - scroll_jump: typing.Optional[int] = None, - embedding_model: typing.Optional[PostV3GoogleGptAsyncFormRequestEmbeddingModel] = None, - dense_weight: typing.Optional[float] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[PostV3GoogleGptAsyncFormRequestResponseFormatType] = None, - serp_search_location: typing.Optional[SerpSearchLocation] = None, - scaleserp_locations: typing.Optional[typing.List[str]] = None, - serp_search_type: typing.Optional[SerpSearchType] = None, - scaleserp_search_field: typing.Optional[str] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> GoogleGptPageStatusResponse: - """ - Parameters - ---------- - search_query : str - - site_filter : str - - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - selected_model : typing.Optional[PostV3GoogleGptAsyncFormRequestSelectedModel] - - max_search_urls : typing.Optional[int] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - embedding_model : typing.Optional[PostV3GoogleGptAsyncFormRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[PostV3GoogleGptAsyncFormRequestResponseFormatType] - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.List[str]] - DEPRECATED: use `serp_search_location` instead - - serp_search_type : typing.Optional[SerpSearchType] - - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - GoogleGptPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3google_gpt_async_form( - search_query="search_query", - site_filter="site_filter", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/google-gpt/async/form", - method="POST", - data={ - "functions": functions, - "variables": variables, - "search_query": search_query, - "site_filter": site_filter, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "selected_model": selected_model, - "max_search_urls": max_search_urls, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, - "settings": settings, - }, - files={}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - GoogleGptPageStatusResponse, - parse_obj_as( - type_=GoogleGptPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def post_v3related_qna_maker_doc_async_form( - self, - *, - search_query: str, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - keyword_query: typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestKeywordQuery] = None, - documents: typing.Optional[typing.List[str]] = None, - max_references: typing.Optional[int] = None, - max_context_words: typing.Optional[int] = None, - scroll_jump: typing.Optional[int] = None, - doc_extract_url: typing.Optional[str] = None, - embedding_model: typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestEmbeddingModel] = None, - dense_weight: typing.Optional[float] = None, - task_instructions: typing.Optional[str] = None, - query_instructions: typing.Optional[str] = None, - selected_model: typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestSelectedModel] = None, - citation_style: typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestCitationStyle] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestResponseFormatType] = None, - serp_search_location: typing.Optional[SerpSearchLocation] = None, - scaleserp_locations: typing.Optional[typing.List[str]] = None, - serp_search_type: typing.Optional[SerpSearchType] = None, - scaleserp_search_field: typing.Optional[str] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> RelatedQnADocPageStatusResponse: - """ - Parameters - ---------- - search_query : str - - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - keyword_query : typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestKeywordQuery] - - documents : typing.Optional[typing.List[str]] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - doc_extract_url : typing.Optional[str] - - embedding_model : typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - selected_model : typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestSelectedModel] - - citation_style : typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestCitationStyle] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestResponseFormatType] - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.List[str]] - DEPRECATED: use `serp_search_location` instead - - serp_search_type : typing.Optional[SerpSearchType] - - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - RelatedQnADocPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3related_qna_maker_doc_async_form( - search_query="search_query", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/related-qna-maker-doc/async/form", - method="POST", - data={ - "functions": functions, - "variables": variables, - "search_query": search_query, - "keyword_query": keyword_query, - "documents": documents, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "doc_extract_url": doc_extract_url, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "selected_model": selected_model, - "citation_style": citation_style, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, - "settings": settings, - }, - files={}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - RelatedQnADocPageStatusResponse, - parse_obj_as( - type_=RelatedQnADocPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def post_v3related_qna_maker_async_form( - self, - *, - search_query: str, - site_filter: str, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - task_instructions: typing.Optional[str] = None, - query_instructions: typing.Optional[str] = None, - selected_model: typing.Optional[PostV3RelatedQnaMakerAsyncFormRequestSelectedModel] = None, - max_search_urls: typing.Optional[int] = None, - max_references: typing.Optional[int] = None, - max_context_words: typing.Optional[int] = None, - scroll_jump: typing.Optional[int] = None, - embedding_model: typing.Optional[PostV3RelatedQnaMakerAsyncFormRequestEmbeddingModel] = None, - dense_weight: typing.Optional[float] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[PostV3RelatedQnaMakerAsyncFormRequestResponseFormatType] = None, - serp_search_location: typing.Optional[SerpSearchLocation] = None, - scaleserp_locations: typing.Optional[typing.List[str]] = None, - serp_search_type: typing.Optional[SerpSearchType] = None, - scaleserp_search_field: typing.Optional[str] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> RelatedQnAPageStatusResponse: - """ - Parameters - ---------- - search_query : str - - site_filter : str - - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - selected_model : typing.Optional[PostV3RelatedQnaMakerAsyncFormRequestSelectedModel] - - max_search_urls : typing.Optional[int] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - embedding_model : typing.Optional[PostV3RelatedQnaMakerAsyncFormRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[PostV3RelatedQnaMakerAsyncFormRequestResponseFormatType] - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.List[str]] - DEPRECATED: use `serp_search_location` instead - - serp_search_type : typing.Optional[SerpSearchType] - - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - RelatedQnAPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3related_qna_maker_async_form( - search_query="search_query", - site_filter="site_filter", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/related-qna-maker/async/form", - method="POST", - data={ - "functions": functions, - "variables": variables, - "search_query": search_query, - "site_filter": site_filter, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "selected_model": selected_model, - "max_search_urls": max_search_urls, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, - "settings": settings, - }, - files={}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - RelatedQnAPageStatusResponse, - parse_obj_as( - type_=RelatedQnAPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def post_v3text2audio_async_form( - self, - *, - text_prompt: str, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - negative_prompt: typing.Optional[str] = None, - duration_sec: typing.Optional[float] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[int] = None, - guidance_scale: typing.Optional[float] = None, - seed: typing.Optional[int] = None, - sd2upscaling: typing.Optional[bool] = None, - selected_models: typing.Optional[typing.List[typing.Literal["audio_ldm"]]] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> Text2AudioPageStatusResponse: - """ - Parameters - ---------- - text_prompt : str - - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - negative_prompt : typing.Optional[str] - - duration_sec : typing.Optional[float] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - seed : typing.Optional[int] - - sd2upscaling : typing.Optional[bool] - - selected_models : typing.Optional[typing.List[typing.Literal["audio_ldm"]]] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - Text2AudioPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3text2audio_async_form( - text_prompt="text_prompt", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/text2audio/async/form", - method="POST", - data={ - "functions": functions, - "variables": variables, - "text_prompt": text_prompt, - "negative_prompt": negative_prompt, - "duration_sec": duration_sec, - "num_outputs": num_outputs, - "quality": quality, - "guidance_scale": guidance_scale, - "seed": seed, - "sd_2_upscaling": sd2upscaling, - "selected_models": selected_models, - "settings": settings, - }, - files={}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - Text2AudioPageStatusResponse, - parse_obj_as( - type_=Text2AudioPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def post_v3translate_async_form( - self, - *, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - texts: typing.Optional[typing.List[str]] = None, - selected_model: typing.Optional[PostV3TranslateAsyncFormRequestSelectedModel] = None, - translation_source: typing.Optional[str] = None, - translation_target: typing.Optional[str] = None, - glossary_document: typing.Optional[str] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> TranslationPageStatusResponse: - """ - Parameters - ---------- - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - texts : typing.Optional[typing.List[str]] - - selected_model : typing.Optional[PostV3TranslateAsyncFormRequestSelectedModel] - - translation_source : typing.Optional[str] - - translation_target : typing.Optional[str] - - glossary_document : typing.Optional[str] - Provide a glossary to customize translation and improve accuracy of domain-specific terms. - If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - TranslationPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3translate_async_form() - """ - _response = self._client_wrapper.httpx_client.request( - "v3/translate/async/form", - method="POST", - data={ - "functions": functions, - "variables": variables, - "texts": texts, - "selected_model": selected_model, - "translation_source": translation_source, - "translation_target": translation_target, - "glossary_document": glossary_document, - "settings": settings, - }, - files={}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - TranslationPageStatusResponse, - parse_obj_as( - type_=TranslationPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def post_v3video_bots_async_form( - self, - *, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - input_prompt: typing.Optional[str] = None, - input_audio: typing.Optional[str] = None, - input_images: typing.Optional[typing.List[str]] = None, - input_documents: typing.Optional[typing.List[str]] = None, - doc_extract_url: typing.Optional[str] = None, - messages: typing.Optional[typing.List[ConversationEntry]] = None, - bot_script: typing.Optional[str] = None, - selected_model: typing.Optional[PostV3VideoBotsAsyncFormRequestSelectedModel] = None, - document_model: typing.Optional[str] = None, - task_instructions: typing.Optional[str] = None, - query_instructions: typing.Optional[str] = None, - keyword_instructions: typing.Optional[str] = None, - documents: typing.Optional[typing.List[str]] = None, - max_references: typing.Optional[int] = None, - max_context_words: typing.Optional[int] = None, - scroll_jump: typing.Optional[int] = None, - embedding_model: typing.Optional[PostV3VideoBotsAsyncFormRequestEmbeddingModel] = None, - dense_weight: typing.Optional[float] = None, - citation_style: typing.Optional[PostV3VideoBotsAsyncFormRequestCitationStyle] = None, - use_url_shortener: typing.Optional[bool] = None, - asr_model: typing.Optional[PostV3VideoBotsAsyncFormRequestAsrModel] = None, - asr_language: typing.Optional[str] = None, - translation_model: typing.Optional[PostV3VideoBotsAsyncFormRequestTranslationModel] = None, - user_language: typing.Optional[str] = None, - input_glossary_document: typing.Optional[str] = None, - output_glossary_document: typing.Optional[str] = None, - lipsync_model: typing.Optional[PostV3VideoBotsAsyncFormRequestLipsyncModel] = None, - tools: typing.Optional[typing.List[LlmTools]] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[PostV3VideoBotsAsyncFormRequestResponseFormatType] = None, - tts_provider: typing.Optional[PostV3VideoBotsAsyncFormRequestTtsProvider] = None, - uberduck_voice_name: typing.Optional[str] = None, - uberduck_speaking_rate: typing.Optional[float] = None, - google_voice_name: typing.Optional[str] = None, - google_speaking_rate: typing.Optional[float] = None, - google_pitch: typing.Optional[float] = None, - bark_history_prompt: typing.Optional[str] = None, - elevenlabs_voice_name: typing.Optional[str] = None, - elevenlabs_api_key: typing.Optional[str] = None, - elevenlabs_voice_id: typing.Optional[str] = None, - elevenlabs_model: typing.Optional[str] = None, - elevenlabs_stability: typing.Optional[float] = None, - elevenlabs_similarity_boost: typing.Optional[float] = None, - elevenlabs_style: typing.Optional[float] = None, - elevenlabs_speaker_boost: typing.Optional[bool] = None, - azure_voice_name: typing.Optional[str] = None, - openai_voice_name: typing.Optional[PostV3VideoBotsAsyncFormRequestOpenaiVoiceName] = None, - openai_tts_model: typing.Optional[PostV3VideoBotsAsyncFormRequestOpenaiTtsModel] = None, - input_face: typing.Optional[str] = None, - face_padding_top: typing.Optional[int] = None, - face_padding_bottom: typing.Optional[int] = None, - face_padding_left: typing.Optional[int] = None, - face_padding_right: typing.Optional[int] = None, - sadtalker_settings: typing.Optional[SadTalkerSettings] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> VideoBotsPageStatusResponse: - """ - Parameters - ---------- - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - input_prompt : typing.Optional[str] - - input_audio : typing.Optional[str] - - input_images : typing.Optional[typing.List[str]] - - input_documents : typing.Optional[typing.List[str]] - - doc_extract_url : typing.Optional[str] - Select a workflow to extract text from documents and images. - - messages : typing.Optional[typing.List[ConversationEntry]] - - bot_script : typing.Optional[str] - - selected_model : typing.Optional[PostV3VideoBotsAsyncFormRequestSelectedModel] - - document_model : typing.Optional[str] - When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - keyword_instructions : typing.Optional[str] - - documents : typing.Optional[typing.List[str]] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - embedding_model : typing.Optional[PostV3VideoBotsAsyncFormRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - - citation_style : typing.Optional[PostV3VideoBotsAsyncFormRequestCitationStyle] - - use_url_shortener : typing.Optional[bool] - - asr_model : typing.Optional[PostV3VideoBotsAsyncFormRequestAsrModel] - Choose a model to transcribe incoming audio messages to text. - - asr_language : typing.Optional[str] - Choose a language to transcribe incoming audio messages to text. - - translation_model : typing.Optional[PostV3VideoBotsAsyncFormRequestTranslationModel] - - user_language : typing.Optional[str] - Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. - - input_glossary_document : typing.Optional[str] - - Translation Glossary for User Langauge -> LLM Language (English) - - - output_glossary_document : typing.Optional[str] - - Translation Glossary for LLM Language (English) -> User Langauge - - - lipsync_model : typing.Optional[PostV3VideoBotsAsyncFormRequestLipsyncModel] - - tools : typing.Optional[typing.List[LlmTools]] - Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[PostV3VideoBotsAsyncFormRequestResponseFormatType] - - tts_provider : typing.Optional[PostV3VideoBotsAsyncFormRequestTtsProvider] - - uberduck_voice_name : typing.Optional[str] - - uberduck_speaking_rate : typing.Optional[float] - - google_voice_name : typing.Optional[str] - - google_speaking_rate : typing.Optional[float] - - google_pitch : typing.Optional[float] - - bark_history_prompt : typing.Optional[str] - - elevenlabs_voice_name : typing.Optional[str] - Use `elevenlabs_voice_id` instead - - elevenlabs_api_key : typing.Optional[str] - - elevenlabs_voice_id : typing.Optional[str] - - elevenlabs_model : typing.Optional[str] - - elevenlabs_stability : typing.Optional[float] - - elevenlabs_similarity_boost : typing.Optional[float] - - elevenlabs_style : typing.Optional[float] - - elevenlabs_speaker_boost : typing.Optional[bool] - - azure_voice_name : typing.Optional[str] - - openai_voice_name : typing.Optional[PostV3VideoBotsAsyncFormRequestOpenaiVoiceName] - - openai_tts_model : typing.Optional[PostV3VideoBotsAsyncFormRequestOpenaiTtsModel] - - input_face : typing.Optional[str] - - face_padding_top : typing.Optional[int] - - face_padding_bottom : typing.Optional[int] - - face_padding_left : typing.Optional[int] - - face_padding_right : typing.Optional[int] - - sadtalker_settings : typing.Optional[SadTalkerSettings] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - VideoBotsPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3video_bots_async_form() - """ - _response = self._client_wrapper.httpx_client.request( - "v3/video-bots/async/form", - method="POST", - data={ - "functions": functions, - "variables": variables, - "input_prompt": input_prompt, - "input_audio": input_audio, - "input_images": input_images, - "input_documents": input_documents, - "doc_extract_url": doc_extract_url, - "messages": messages, - "bot_script": bot_script, - "selected_model": selected_model, - "document_model": document_model, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "keyword_instructions": keyword_instructions, - "documents": documents, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "citation_style": citation_style, - "use_url_shortener": use_url_shortener, - "asr_model": asr_model, - "asr_language": asr_language, - "translation_model": translation_model, - "user_language": user_language, - "input_glossary_document": input_glossary_document, - "output_glossary_document": output_glossary_document, - "lipsync_model": lipsync_model, - "tools": tools, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "tts_provider": tts_provider, - "uberduck_voice_name": uberduck_voice_name, - "uberduck_speaking_rate": uberduck_speaking_rate, - "google_voice_name": google_voice_name, - "google_speaking_rate": google_speaking_rate, - "google_pitch": google_pitch, - "bark_history_prompt": bark_history_prompt, - "elevenlabs_voice_name": elevenlabs_voice_name, - "elevenlabs_api_key": elevenlabs_api_key, - "elevenlabs_voice_id": elevenlabs_voice_id, - "elevenlabs_model": elevenlabs_model, - "elevenlabs_stability": elevenlabs_stability, - "elevenlabs_similarity_boost": elevenlabs_similarity_boost, - "elevenlabs_style": elevenlabs_style, - "elevenlabs_speaker_boost": elevenlabs_speaker_boost, - "azure_voice_name": azure_voice_name, - "openai_voice_name": openai_voice_name, - "openai_tts_model": openai_tts_model, - "input_face": input_face, - "face_padding_top": face_padding_top, - "face_padding_bottom": face_padding_bottom, - "face_padding_left": face_padding_left, - "face_padding_right": face_padding_right, - "sadtalker_settings": sadtalker_settings, - "settings": settings, - }, - files={}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - VideoBotsPageStatusResponse, - parse_obj_as( - type_=VideoBotsPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncGooey: - """ - Use this class to access the different functions within the SDK. You can instantiate any number of clients with different configuration that will propagate to these functions. - - Parameters - ---------- - base_url : typing.Optional[str] - The base url to use for requests from the client. - - environment : GooeyEnvironment - The environment to use for requests from the client. from .environment import GooeyEnvironment - - - - Defaults to GooeyEnvironment.DEFAULT - - - - api_key : typing.Optional[typing.Union[str, typing.Callable[[], str]]] - timeout : typing.Optional[float] - The timeout to be used, in seconds, for requests. By default the timeout is 60 seconds, unless a custom httpx client is used, in which case this default is not enforced. - - follow_redirects : typing.Optional[bool] - Whether the default httpx client follows redirects or not, this is irrelevant if a custom httpx client is passed in. - - httpx_client : typing.Optional[httpx.AsyncClient] - The httpx client to use for making requests, a preconfigured client is used by default, however this is useful should you want to pass in any custom httpx configuration. - - Examples - -------- - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - """ - - def __init__( - self, - *, - base_url: typing.Optional[str] = None, - environment: GooeyEnvironment = GooeyEnvironment.DEFAULT, - api_key: typing.Optional[typing.Union[str, typing.Callable[[], str]]] = os.getenv("GOOEY_API_KEY"), - timeout: typing.Optional[float] = None, - follow_redirects: typing.Optional[bool] = True, - httpx_client: typing.Optional[httpx.AsyncClient] = None, - ): - _defaulted_timeout = timeout if timeout is not None else 60 if httpx_client is None else None - if api_key is None: - raise ApiError(body="The client must be instantiated be either passing in api_key or setting GOOEY_API_KEY") - self._client_wrapper = AsyncClientWrapper( - base_url=_get_base_url(base_url=base_url, environment=environment), - api_key=api_key, - httpx_client=httpx_client - if httpx_client is not None - else httpx.AsyncClient(timeout=_defaulted_timeout, follow_redirects=follow_redirects) - if follow_redirects is not None - else httpx.AsyncClient(timeout=_defaulted_timeout), - timeout=_defaulted_timeout, - ) - self.copilot_integrations = AsyncCopilotIntegrationsClient(client_wrapper=self._client_wrapper) - self.copilot_for_your_enterprise = AsyncCopilotForYourEnterpriseClient(client_wrapper=self._client_wrapper) - self.evaluator = AsyncEvaluatorClient(client_wrapper=self._client_wrapper) - self.smart_gpt = AsyncSmartGptClient(client_wrapper=self._client_wrapper) - self.functions = AsyncFunctionsClient(client_wrapper=self._client_wrapper) - self.lip_syncing = AsyncLipSyncingClient(client_wrapper=self._client_wrapper) - self.misc = AsyncMiscClient(client_wrapper=self._client_wrapper) - - async def animate( - self, - *, - animation_prompts: typing.Sequence[AnimationPrompt], - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - max_frames: typing.Optional[int] = OMIT, - selected_model: typing.Optional[DeforumSdPageRequestSelectedModel] = OMIT, - animation_mode: typing.Optional[str] = OMIT, - zoom: typing.Optional[str] = OMIT, - translation_x: typing.Optional[str] = OMIT, - translation_y: typing.Optional[str] = OMIT, - rotation3d_x: typing.Optional[str] = OMIT, - rotation3d_y: typing.Optional[str] = OMIT, - rotation3d_z: typing.Optional[str] = OMIT, - fps: typing.Optional[int] = OMIT, - seed: typing.Optional[int] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[DeforumSdPageOutput]: - """ - Parameters - ---------- - animation_prompts : typing.Sequence[AnimationPrompt] - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - max_frames : typing.Optional[int] - - selected_model : typing.Optional[DeforumSdPageRequestSelectedModel] - - animation_mode : typing.Optional[str] - - zoom : typing.Optional[str] - - translation_x : typing.Optional[str] - - translation_y : typing.Optional[str] - - rotation3d_x : typing.Optional[str] - - rotation3d_y : typing.Optional[str] - - rotation3d_z : typing.Optional[str] - - fps : typing.Optional[int] - - seed : typing.Optional[int] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.Optional[DeforumSdPageOutput] - Successful Response - - Examples - -------- - import asyncio - - from gooey import AnimationPrompt, AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.animate( - animation_prompts=[ - AnimationPrompt( - frame="frame", - prompt="prompt", - ) - ], - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/DeforumSD/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "animation_prompts": animation_prompts, - "max_frames": max_frames, - "selected_model": selected_model, - "animation_mode": animation_mode, - "zoom": zoom, - "translation_x": translation_x, - "translation_y": translation_y, - "rotation_3d_x": rotation3d_x, - "rotation_3d_y": rotation3d_y, - "rotation_3d_z": rotation3d_z, - "fps": fps, - "seed": seed, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - DeforumSdPageStatusResponse, - parse_obj_as( - type_=DeforumSdPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def qr_code( - self, - *, - text_prompt: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - qr_code_data: typing.Optional[str] = OMIT, - qr_code_input_image: typing.Optional[str] = OMIT, - qr_code_vcard: typing.Optional[Vcard] = OMIT, - qr_code_file: typing.Optional[str] = OMIT, - use_url_shortener: typing.Optional[bool] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - image_prompt: typing.Optional[str] = OMIT, - image_prompt_controlnet_models: typing.Optional[ - typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem] - ] = OMIT, - image_prompt_strength: typing.Optional[float] = OMIT, - image_prompt_scale: typing.Optional[float] = OMIT, - image_prompt_pos_x: typing.Optional[float] = OMIT, - image_prompt_pos_y: typing.Optional[float] = OMIT, - selected_model: typing.Optional[QrCodeGeneratorPageRequestSelectedModel] = OMIT, - selected_controlnet_model: typing.Optional[ - typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem] - ] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - scheduler: typing.Optional[QrCodeGeneratorPageRequestScheduler] = OMIT, - seed: typing.Optional[int] = OMIT, - obj_scale: typing.Optional[float] = OMIT, - obj_pos_x: typing.Optional[float] = OMIT, - obj_pos_y: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[QrCodeGeneratorPageOutput]: - """ - Parameters - ---------- - text_prompt : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - qr_code_data : typing.Optional[str] - - qr_code_input_image : typing.Optional[str] - - qr_code_vcard : typing.Optional[Vcard] - - qr_code_file : typing.Optional[str] - - use_url_shortener : typing.Optional[bool] - - negative_prompt : typing.Optional[str] - - image_prompt : typing.Optional[str] - - image_prompt_controlnet_models : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]] - - image_prompt_strength : typing.Optional[float] - - image_prompt_scale : typing.Optional[float] - - image_prompt_pos_x : typing.Optional[float] - - image_prompt_pos_y : typing.Optional[float] - - selected_model : typing.Optional[QrCodeGeneratorPageRequestSelectedModel] - - selected_controlnet_model : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem]] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - scheduler : typing.Optional[QrCodeGeneratorPageRequestScheduler] - - seed : typing.Optional[int] - - obj_scale : typing.Optional[float] - - obj_pos_x : typing.Optional[float] - - obj_pos_y : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.Optional[QrCodeGeneratorPageOutput] - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.qr_code( - text_prompt="text_prompt", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/art-qr-code/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "qr_code_data": qr_code_data, - "qr_code_input_image": qr_code_input_image, - "qr_code_vcard": qr_code_vcard, - "qr_code_file": qr_code_file, - "use_url_shortener": use_url_shortener, - "text_prompt": text_prompt, - "negative_prompt": negative_prompt, - "image_prompt": image_prompt, - "image_prompt_controlnet_models": image_prompt_controlnet_models, - "image_prompt_strength": image_prompt_strength, - "image_prompt_scale": image_prompt_scale, - "image_prompt_pos_x": image_prompt_pos_x, - "image_prompt_pos_y": image_prompt_pos_y, - "selected_model": selected_model, - "selected_controlnet_model": selected_controlnet_model, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "controlnet_conditioning_scale": controlnet_conditioning_scale, - "num_outputs": num_outputs, - "quality": quality, - "scheduler": scheduler, - "seed": seed, - "obj_scale": obj_scale, - "obj_pos_x": obj_pos_x, - "obj_pos_y": obj_pos_y, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - QrCodeGeneratorPageStatusResponse, - parse_obj_as( - type_=QrCodeGeneratorPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def seo_people_also_ask( - self, - *, - search_query: str, - site_filter: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - query_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[RelatedQnAPageRequestSelectedModel] = OMIT, - max_search_urls: typing.Optional[int] = OMIT, - max_references: typing.Optional[int] = OMIT, - max_context_words: typing.Optional[int] = OMIT, - scroll_jump: typing.Optional[int] = OMIT, - embedding_model: typing.Optional[RelatedQnAPageRequestEmbeddingModel] = OMIT, - dense_weight: typing.Optional[float] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[RelatedQnAPageRequestResponseFormatType] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, - scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - serp_search_type: typing.Optional[SerpSearchType] = OMIT, - scaleserp_search_field: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[RelatedQnAPageOutput]: - """ - Parameters - ---------- - search_query : str - - site_filter : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - selected_model : typing.Optional[RelatedQnAPageRequestSelectedModel] - - max_search_urls : typing.Optional[int] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - embedding_model : typing.Optional[RelatedQnAPageRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[RelatedQnAPageRequestResponseFormatType] - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.Sequence[str]] - DEPRECATED: use `serp_search_location` instead - - serp_search_type : typing.Optional[SerpSearchType] - - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.Optional[RelatedQnAPageOutput] - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.seo_people_also_ask( - search_query="search_query", - site_filter="site_filter", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/related-qna-maker/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "search_query": search_query, - "site_filter": site_filter, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "selected_model": selected_model, - "max_search_urls": max_search_urls, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - RelatedQnAPageStatusResponse, - parse_obj_as( - type_=RelatedQnAPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def seo_content( - self, - *, - search_query: str, - keywords: str, - title: str, - company_url: str, - example_id: typing.Optional[str] = None, - task_instructions: typing.Optional[str] = OMIT, - enable_html: typing.Optional[bool] = OMIT, - selected_model: typing.Optional[SeoSummaryPageRequestSelectedModel] = OMIT, - max_search_urls: typing.Optional[int] = OMIT, - enable_crosslinks: typing.Optional[bool] = OMIT, - seed: typing.Optional[int] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[SeoSummaryPageRequestResponseFormatType] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, - scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - serp_search_type: typing.Optional[SerpSearchType] = OMIT, - scaleserp_search_field: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[SeoSummaryPageOutput]: - """ - Parameters - ---------- - search_query : str - - keywords : str - - title : str - - company_url : str - - example_id : typing.Optional[str] - - task_instructions : typing.Optional[str] - - enable_html : typing.Optional[bool] - - selected_model : typing.Optional[SeoSummaryPageRequestSelectedModel] - - max_search_urls : typing.Optional[int] - - enable_crosslinks : typing.Optional[bool] - - seed : typing.Optional[int] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[SeoSummaryPageRequestResponseFormatType] - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.Sequence[str]] - DEPRECATED: use `serp_search_location` instead - - serp_search_type : typing.Optional[SerpSearchType] - - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.Optional[SeoSummaryPageOutput] - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.seo_content( - search_query="search_query", - keywords="keywords", - title="title", - company_url="company_url", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/SEOSummary/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "search_query": search_query, - "keywords": keywords, - "title": title, - "company_url": company_url, - "task_instructions": task_instructions, - "enable_html": enable_html, - "selected_model": selected_model, - "max_search_urls": max_search_urls, - "enable_crosslinks": enable_crosslinks, - "seed": seed, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - SeoSummaryPageStatusResponse, - parse_obj_as( - type_=SeoSummaryPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def web_search_llm( - self, - *, - search_query: str, - site_filter: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - query_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[GoogleGptPageRequestSelectedModel] = OMIT, - max_search_urls: typing.Optional[int] = OMIT, - max_references: typing.Optional[int] = OMIT, - max_context_words: typing.Optional[int] = OMIT, - scroll_jump: typing.Optional[int] = OMIT, - embedding_model: typing.Optional[GoogleGptPageRequestEmbeddingModel] = OMIT, - dense_weight: typing.Optional[float] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[GoogleGptPageRequestResponseFormatType] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, - scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - serp_search_type: typing.Optional[SerpSearchType] = OMIT, - scaleserp_search_field: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[GoogleGptPageOutput]: - """ - Parameters - ---------- - search_query : str - - site_filter : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - selected_model : typing.Optional[GoogleGptPageRequestSelectedModel] - - max_search_urls : typing.Optional[int] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - embedding_model : typing.Optional[GoogleGptPageRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[GoogleGptPageRequestResponseFormatType] - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.Sequence[str]] - DEPRECATED: use `serp_search_location` instead - - serp_search_type : typing.Optional[SerpSearchType] - - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.Optional[GoogleGptPageOutput] - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.web_search_llm( - search_query="search_query", - site_filter="site_filter", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/google-gpt/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "search_query": search_query, - "site_filter": site_filter, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "selected_model": selected_model, - "max_search_urls": max_search_urls, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - GoogleGptPageStatusResponse, - parse_obj_as( - type_=GoogleGptPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def personalize_email( - self, - *, - email_address: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - input_prompt: typing.Optional[str] = OMIT, - selected_model: typing.Optional[SocialLookupEmailPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[SocialLookupEmailPageRequestResponseFormatType] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[SocialLookupEmailPageOutput]: - """ - Parameters - ---------- - email_address : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - input_prompt : typing.Optional[str] - - selected_model : typing.Optional[SocialLookupEmailPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[SocialLookupEmailPageRequestResponseFormatType] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.Optional[SocialLookupEmailPageOutput] - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.personalize_email( - email_address="email_address", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/SocialLookupEmail/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "email_address": email_address, - "input_prompt": input_prompt, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - SocialLookupEmailPageStatusResponse, - parse_obj_as( - type_=SocialLookupEmailPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def bulk_run( - self, - *, - documents: typing.Sequence[str], - run_urls: typing.Sequence[str], - input_columns: typing.Dict[str, str], - output_columns: typing.Dict[str, str], - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - eval_urls: typing.Optional[typing.Sequence[str]] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[BulkRunnerPageOutput]: - """ - Parameters - ---------- - documents : typing.Sequence[str] - - Upload or link to a CSV or google sheet that contains your sample input data. - For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. - Remember to includes header names in your CSV too. - - - run_urls : typing.Sequence[str] - - Provide one or more Gooey.AI workflow runs. - You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them. - - - input_columns : typing.Dict[str, str] - - For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it. - - - output_columns : typing.Dict[str, str] - - For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data. - - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - eval_urls : typing.Optional[typing.Sequence[str]] - - _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs. - - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.Optional[BulkRunnerPageOutput] - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.bulk_run( - documents=["documents"], - run_urls=["run_urls"], - input_columns={"key": "value"}, - output_columns={"key": "value"}, - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/bulk-runner/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "documents": documents, - "run_urls": run_urls, - "input_columns": input_columns, - "output_columns": output_columns, - "eval_urls": eval_urls, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - BulkRunnerPageStatusResponse, - parse_obj_as( - type_=BulkRunnerPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def synthesize_data( - self, - *, - documents: typing.Sequence[str], - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - sheet_url: typing.Optional[str] = OMIT, - selected_asr_model: typing.Optional[DocExtractPageRequestSelectedAsrModel] = OMIT, - google_translate_target: typing.Optional[str] = OMIT, - glossary_document: typing.Optional[str] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[DocExtractPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[DocExtractPageRequestResponseFormatType] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[DocExtractPageOutput]: - """ - Parameters - ---------- - documents : typing.Sequence[str] - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - sheet_url : typing.Optional[str] - - selected_asr_model : typing.Optional[DocExtractPageRequestSelectedAsrModel] - - google_translate_target : typing.Optional[str] - - glossary_document : typing.Optional[str] - Provide a glossary to customize translation and improve accuracy of domain-specific terms. - If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). - - task_instructions : typing.Optional[str] - - selected_model : typing.Optional[DocExtractPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[DocExtractPageRequestResponseFormatType] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.Optional[DocExtractPageOutput] - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.synthesize_data( - documents=["documents"], - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/doc-extract/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "documents": documents, - "sheet_url": sheet_url, - "selected_asr_model": selected_asr_model, - "google_translate_target": google_translate_target, - "glossary_document": glossary_document, - "task_instructions": task_instructions, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - DocExtractPageStatusResponse, - parse_obj_as( - type_=DocExtractPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def llm( - self, - *, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - input_prompt: typing.Optional[str] = OMIT, - selected_models: typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[CompareLlmPageRequestResponseFormatType] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[CompareLlmPageOutput]: - """ - Parameters - ---------- - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - input_prompt : typing.Optional[str] - - selected_models : typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[CompareLlmPageRequestResponseFormatType] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.Optional[CompareLlmPageOutput] - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.llm() - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/CompareLLM/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "input_prompt": input_prompt, - "selected_models": selected_models, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - CompareLlmPageStatusResponse, - parse_obj_as( - type_=CompareLlmPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def rag( - self, - *, - search_query: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - keyword_query: typing.Optional[DocSearchPageRequestKeywordQuery] = OMIT, - documents: typing.Optional[typing.Sequence[str]] = OMIT, - max_references: typing.Optional[int] = OMIT, - max_context_words: typing.Optional[int] = OMIT, - scroll_jump: typing.Optional[int] = OMIT, - doc_extract_url: typing.Optional[str] = OMIT, - embedding_model: typing.Optional[DocSearchPageRequestEmbeddingModel] = OMIT, - dense_weight: typing.Optional[float] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - query_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[DocSearchPageRequestSelectedModel] = OMIT, - citation_style: typing.Optional[DocSearchPageRequestCitationStyle] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[DocSearchPageRequestResponseFormatType] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[DocSearchPageOutput]: - """ - Parameters - ---------- - search_query : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - keyword_query : typing.Optional[DocSearchPageRequestKeywordQuery] - - documents : typing.Optional[typing.Sequence[str]] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - doc_extract_url : typing.Optional[str] - - embedding_model : typing.Optional[DocSearchPageRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - selected_model : typing.Optional[DocSearchPageRequestSelectedModel] - - citation_style : typing.Optional[DocSearchPageRequestCitationStyle] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[DocSearchPageRequestResponseFormatType] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.Optional[DocSearchPageOutput] - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.rag( - search_query="search_query", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/doc-search/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "search_query": search_query, - "keyword_query": keyword_query, - "documents": documents, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "doc_extract_url": doc_extract_url, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "selected_model": selected_model, - "citation_style": citation_style, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - DocSearchPageStatusResponse, - parse_obj_as( - type_=DocSearchPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def doc_summary( - self, - *, - documents: typing.Sequence[str], - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - merge_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[DocSummaryPageRequestSelectedModel] = OMIT, - chain_type: typing.Optional[typing.Literal["map_reduce"]] = OMIT, - selected_asr_model: typing.Optional[DocSummaryPageRequestSelectedAsrModel] = OMIT, - google_translate_target: typing.Optional[str] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[DocSummaryPageRequestResponseFormatType] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[DocSummaryPageOutput]: - """ - Parameters - ---------- - documents : typing.Sequence[str] - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - task_instructions : typing.Optional[str] - - merge_instructions : typing.Optional[str] - - selected_model : typing.Optional[DocSummaryPageRequestSelectedModel] - - chain_type : typing.Optional[typing.Literal["map_reduce"]] - - selected_asr_model : typing.Optional[DocSummaryPageRequestSelectedAsrModel] - - google_translate_target : typing.Optional[str] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[DocSummaryPageRequestResponseFormatType] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.Optional[DocSummaryPageOutput] - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.doc_summary( - documents=["documents"], - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/doc-summary/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "documents": documents, - "task_instructions": task_instructions, - "merge_instructions": merge_instructions, - "selected_model": selected_model, - "chain_type": chain_type, - "selected_asr_model": selected_asr_model, - "google_translate_target": google_translate_target, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - DocSummaryPageStatusResponse, - parse_obj_as( - type_=DocSummaryPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def lipsync_tts( - self, - *, - text_prompt: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - tts_provider: typing.Optional[LipsyncTtsPageRequestTtsProvider] = OMIT, - uberduck_voice_name: typing.Optional[str] = OMIT, - uberduck_speaking_rate: typing.Optional[float] = OMIT, - google_voice_name: typing.Optional[str] = OMIT, - google_speaking_rate: typing.Optional[float] = OMIT, - google_pitch: typing.Optional[float] = OMIT, - bark_history_prompt: typing.Optional[str] = OMIT, - elevenlabs_voice_name: typing.Optional[str] = OMIT, - elevenlabs_api_key: typing.Optional[str] = OMIT, - elevenlabs_voice_id: typing.Optional[str] = OMIT, - elevenlabs_model: typing.Optional[str] = OMIT, - elevenlabs_stability: typing.Optional[float] = OMIT, - elevenlabs_similarity_boost: typing.Optional[float] = OMIT, - elevenlabs_style: typing.Optional[float] = OMIT, - elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, - azure_voice_name: typing.Optional[str] = OMIT, - openai_voice_name: typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] = OMIT, - openai_tts_model: typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] = OMIT, - input_face: typing.Optional[str] = OMIT, - face_padding_top: typing.Optional[int] = OMIT, - face_padding_bottom: typing.Optional[int] = OMIT, - face_padding_left: typing.Optional[int] = OMIT, - face_padding_right: typing.Optional[int] = OMIT, - sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT, - selected_model: typing.Optional[LipsyncTtsPageRequestSelectedModel] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[LipsyncTtsPageOutput]: - """ - Parameters - ---------- - text_prompt : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - tts_provider : typing.Optional[LipsyncTtsPageRequestTtsProvider] - - uberduck_voice_name : typing.Optional[str] - - uberduck_speaking_rate : typing.Optional[float] - - google_voice_name : typing.Optional[str] - - google_speaking_rate : typing.Optional[float] - - google_pitch : typing.Optional[float] - - bark_history_prompt : typing.Optional[str] - - elevenlabs_voice_name : typing.Optional[str] - Use `elevenlabs_voice_id` instead - - elevenlabs_api_key : typing.Optional[str] - - elevenlabs_voice_id : typing.Optional[str] - - elevenlabs_model : typing.Optional[str] - - elevenlabs_stability : typing.Optional[float] - - elevenlabs_similarity_boost : typing.Optional[float] - - elevenlabs_style : typing.Optional[float] - - elevenlabs_speaker_boost : typing.Optional[bool] - - azure_voice_name : typing.Optional[str] - - openai_voice_name : typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] - - openai_tts_model : typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] - - input_face : typing.Optional[str] - - face_padding_top : typing.Optional[int] - - face_padding_bottom : typing.Optional[int] - - face_padding_left : typing.Optional[int] - - face_padding_right : typing.Optional[int] - - sadtalker_settings : typing.Optional[SadTalkerSettings] - - selected_model : typing.Optional[LipsyncTtsPageRequestSelectedModel] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.Optional[LipsyncTtsPageOutput] - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.lipsync_tts( - text_prompt="text_prompt", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/LipsyncTTS/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "text_prompt": text_prompt, - "tts_provider": tts_provider, - "uberduck_voice_name": uberduck_voice_name, - "uberduck_speaking_rate": uberduck_speaking_rate, - "google_voice_name": google_voice_name, - "google_speaking_rate": google_speaking_rate, - "google_pitch": google_pitch, - "bark_history_prompt": bark_history_prompt, - "elevenlabs_voice_name": elevenlabs_voice_name, - "elevenlabs_api_key": elevenlabs_api_key, - "elevenlabs_voice_id": elevenlabs_voice_id, - "elevenlabs_model": elevenlabs_model, - "elevenlabs_stability": elevenlabs_stability, - "elevenlabs_similarity_boost": elevenlabs_similarity_boost, - "elevenlabs_style": elevenlabs_style, - "elevenlabs_speaker_boost": elevenlabs_speaker_boost, - "azure_voice_name": azure_voice_name, - "openai_voice_name": openai_voice_name, - "openai_tts_model": openai_tts_model, - "input_face": input_face, - "face_padding_top": face_padding_top, - "face_padding_bottom": face_padding_bottom, - "face_padding_left": face_padding_left, - "face_padding_right": face_padding_right, - "sadtalker_settings": sadtalker_settings, - "selected_model": selected_model, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - LipsyncTtsPageStatusResponse, - parse_obj_as( - type_=LipsyncTtsPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def text_to_speech( - self, - *, - text_prompt: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - tts_provider: typing.Optional[TextToSpeechPageRequestTtsProvider] = OMIT, - uberduck_voice_name: typing.Optional[str] = OMIT, - uberduck_speaking_rate: typing.Optional[float] = OMIT, - google_voice_name: typing.Optional[str] = OMIT, - google_speaking_rate: typing.Optional[float] = OMIT, - google_pitch: typing.Optional[float] = OMIT, - bark_history_prompt: typing.Optional[str] = OMIT, - elevenlabs_voice_name: typing.Optional[str] = OMIT, - elevenlabs_api_key: typing.Optional[str] = OMIT, - elevenlabs_voice_id: typing.Optional[str] = OMIT, - elevenlabs_model: typing.Optional[str] = OMIT, - elevenlabs_stability: typing.Optional[float] = OMIT, - elevenlabs_similarity_boost: typing.Optional[float] = OMIT, - elevenlabs_style: typing.Optional[float] = OMIT, - elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, - azure_voice_name: typing.Optional[str] = OMIT, - openai_voice_name: typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] = OMIT, - openai_tts_model: typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[TextToSpeechPageOutput]: - """ - Parameters - ---------- - text_prompt : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - tts_provider : typing.Optional[TextToSpeechPageRequestTtsProvider] - - uberduck_voice_name : typing.Optional[str] - - uberduck_speaking_rate : typing.Optional[float] - - google_voice_name : typing.Optional[str] - - google_speaking_rate : typing.Optional[float] - - google_pitch : typing.Optional[float] - - bark_history_prompt : typing.Optional[str] - - elevenlabs_voice_name : typing.Optional[str] - Use `elevenlabs_voice_id` instead - - elevenlabs_api_key : typing.Optional[str] - - elevenlabs_voice_id : typing.Optional[str] - - elevenlabs_model : typing.Optional[str] - - elevenlabs_stability : typing.Optional[float] - - elevenlabs_similarity_boost : typing.Optional[float] - - elevenlabs_style : typing.Optional[float] - - elevenlabs_speaker_boost : typing.Optional[bool] - - azure_voice_name : typing.Optional[str] - - openai_voice_name : typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] - - openai_tts_model : typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.Optional[TextToSpeechPageOutput] - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.text_to_speech( - text_prompt="text_prompt", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/TextToSpeech/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "text_prompt": text_prompt, - "tts_provider": tts_provider, - "uberduck_voice_name": uberduck_voice_name, - "uberduck_speaking_rate": uberduck_speaking_rate, - "google_voice_name": google_voice_name, - "google_speaking_rate": google_speaking_rate, - "google_pitch": google_pitch, - "bark_history_prompt": bark_history_prompt, - "elevenlabs_voice_name": elevenlabs_voice_name, - "elevenlabs_api_key": elevenlabs_api_key, - "elevenlabs_voice_id": elevenlabs_voice_id, - "elevenlabs_model": elevenlabs_model, - "elevenlabs_stability": elevenlabs_stability, - "elevenlabs_similarity_boost": elevenlabs_similarity_boost, - "elevenlabs_style": elevenlabs_style, - "elevenlabs_speaker_boost": elevenlabs_speaker_boost, - "azure_voice_name": azure_voice_name, - "openai_voice_name": openai_voice_name, - "openai_tts_model": openai_tts_model, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - TextToSpeechPageStatusResponse, - parse_obj_as( - type_=TextToSpeechPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def speech_recognition( - self, - *, - documents: typing.Sequence[str], - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - selected_model: typing.Optional[AsrPageRequestSelectedModel] = OMIT, - language: typing.Optional[str] = OMIT, - translation_model: typing.Optional[AsrPageRequestTranslationModel] = OMIT, - output_format: typing.Optional[AsrPageRequestOutputFormat] = OMIT, - google_translate_target: typing.Optional[str] = OMIT, - translation_source: typing.Optional[str] = OMIT, - translation_target: typing.Optional[str] = OMIT, - glossary_document: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[AsrPageOutput]: - """ - Parameters - ---------- - documents : typing.Sequence[str] - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - selected_model : typing.Optional[AsrPageRequestSelectedModel] - - language : typing.Optional[str] - - translation_model : typing.Optional[AsrPageRequestTranslationModel] - - output_format : typing.Optional[AsrPageRequestOutputFormat] - - google_translate_target : typing.Optional[str] - use `translation_model` & `translation_target` instead. - - translation_source : typing.Optional[str] - - translation_target : typing.Optional[str] - - glossary_document : typing.Optional[str] - Provide a glossary to customize translation and improve accuracy of domain-specific terms. - If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.Optional[AsrPageOutput] - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.speech_recognition( - documents=["documents"], - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/asr/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "documents": documents, - "selected_model": selected_model, - "language": language, - "translation_model": translation_model, - "output_format": output_format, - "google_translate_target": google_translate_target, - "translation_source": translation_source, - "translation_target": translation_target, - "glossary_document": glossary_document, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - AsrPageStatusResponse, - parse_obj_as( - type_=AsrPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def text_to_music( - self, - *, - text_prompt: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - duration_sec: typing.Optional[float] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - seed: typing.Optional[int] = OMIT, - sd2upscaling: typing.Optional[bool] = OMIT, - selected_models: typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[Text2AudioPageOutput]: - """ - Parameters - ---------- - text_prompt : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - negative_prompt : typing.Optional[str] - - duration_sec : typing.Optional[float] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - seed : typing.Optional[int] - - sd2upscaling : typing.Optional[bool] - - selected_models : typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.Optional[Text2AudioPageOutput] - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.text_to_music( - text_prompt="text_prompt", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/text2audio/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "text_prompt": text_prompt, - "negative_prompt": negative_prompt, - "duration_sec": duration_sec, - "num_outputs": num_outputs, - "quality": quality, - "guidance_scale": guidance_scale, - "seed": seed, - "sd_2_upscaling": sd2upscaling, - "selected_models": selected_models, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - Text2AudioPageStatusResponse, - parse_obj_as( - type_=Text2AudioPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def translate( - self, - *, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - texts: typing.Optional[typing.Sequence[str]] = OMIT, - selected_model: typing.Optional[TranslationPageRequestSelectedModel] = OMIT, - translation_source: typing.Optional[str] = OMIT, - translation_target: typing.Optional[str] = OMIT, - glossary_document: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[TranslationPageOutput]: - """ - Parameters - ---------- - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - texts : typing.Optional[typing.Sequence[str]] - - selected_model : typing.Optional[TranslationPageRequestSelectedModel] - - translation_source : typing.Optional[str] - - translation_target : typing.Optional[str] - - glossary_document : typing.Optional[str] - Provide a glossary to customize translation and improve accuracy of domain-specific terms. - If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.Optional[TranslationPageOutput] - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.translate() - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/translate/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "texts": texts, - "selected_model": selected_model, - "translation_source": translation_source, - "translation_target": translation_target, - "glossary_document": glossary_document, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - TranslationPageStatusResponse, - parse_obj_as( - type_=TranslationPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def remix_image( - self, - *, - input_image: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - text_prompt: typing.Optional[str] = OMIT, - selected_model: typing.Optional[Img2ImgPageRequestSelectedModel] = OMIT, - selected_controlnet_model: typing.Optional[Img2ImgPageRequestSelectedControlnetModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - prompt_strength: typing.Optional[float] = OMIT, - controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT, - seed: typing.Optional[int] = OMIT, - image_guidance_scale: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[Img2ImgPageOutput]: - """ - Parameters - ---------- - input_image : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - text_prompt : typing.Optional[str] - - selected_model : typing.Optional[Img2ImgPageRequestSelectedModel] - - selected_controlnet_model : typing.Optional[Img2ImgPageRequestSelectedControlnetModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - prompt_strength : typing.Optional[float] - - controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]] - - seed : typing.Optional[int] - - image_guidance_scale : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.Optional[Img2ImgPageOutput] - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.remix_image( - input_image="input_image", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/Img2Img/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "text_prompt": text_prompt, - "selected_model": selected_model, - "selected_controlnet_model": selected_controlnet_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "prompt_strength": prompt_strength, - "controlnet_conditioning_scale": controlnet_conditioning_scale, - "seed": seed, - "image_guidance_scale": image_guidance_scale, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - Img2ImgPageStatusResponse, - parse_obj_as( - type_=Img2ImgPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def text_to_image( - self, - *, - text_prompt: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - dall_e3quality: typing.Optional[str] = OMIT, - dall_e3style: typing.Optional[str] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - seed: typing.Optional[int] = OMIT, - sd2upscaling: typing.Optional[bool] = OMIT, - selected_models: typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] = OMIT, - scheduler: typing.Optional[CompareText2ImgPageRequestScheduler] = OMIT, - edit_instruction: typing.Optional[str] = OMIT, - image_guidance_scale: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[CompareText2ImgPageOutput]: - """ - Parameters - ---------- - text_prompt : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - negative_prompt : typing.Optional[str] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - dall_e3quality : typing.Optional[str] - - dall_e3style : typing.Optional[str] - - guidance_scale : typing.Optional[float] - - seed : typing.Optional[int] - - sd2upscaling : typing.Optional[bool] - - selected_models : typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] - - scheduler : typing.Optional[CompareText2ImgPageRequestScheduler] - - edit_instruction : typing.Optional[str] - - image_guidance_scale : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.Optional[CompareText2ImgPageOutput] - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.text_to_image( - text_prompt="text_prompt", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/CompareText2Img/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "text_prompt": text_prompt, - "negative_prompt": negative_prompt, - "output_width": output_width, - "output_height": output_height, - "num_outputs": num_outputs, - "quality": quality, - "dall_e_3_quality": dall_e3quality, - "dall_e_3_style": dall_e3style, - "guidance_scale": guidance_scale, - "seed": seed, - "sd_2_upscaling": sd2upscaling, - "selected_models": selected_models, - "scheduler": scheduler, - "edit_instruction": edit_instruction, - "image_guidance_scale": image_guidance_scale, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - CompareText2ImgPageStatusResponse, - parse_obj_as( - type_=CompareText2ImgPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def product_image( - self, - *, - input_image: str, - text_prompt: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - obj_scale: typing.Optional[float] = OMIT, - obj_pos_x: typing.Optional[float] = OMIT, - obj_pos_y: typing.Optional[float] = OMIT, - mask_threshold: typing.Optional[float] = OMIT, - selected_model: typing.Optional[ObjectInpaintingPageRequestSelectedModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - sd2upscaling: typing.Optional[bool] = OMIT, - seed: typing.Optional[int] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[ObjectInpaintingPageOutput]: - """ - Parameters - ---------- - input_image : str - - text_prompt : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - obj_scale : typing.Optional[float] - - obj_pos_x : typing.Optional[float] - - obj_pos_y : typing.Optional[float] - - mask_threshold : typing.Optional[float] - - selected_model : typing.Optional[ObjectInpaintingPageRequestSelectedModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - sd2upscaling : typing.Optional[bool] - - seed : typing.Optional[int] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.Optional[ObjectInpaintingPageOutput] - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.product_image( - input_image="input_image", - text_prompt="text_prompt", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/ObjectInpainting/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "text_prompt": text_prompt, - "obj_scale": obj_scale, - "obj_pos_x": obj_pos_x, - "obj_pos_y": obj_pos_y, - "mask_threshold": mask_threshold, - "selected_model": selected_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "sd_2_upscaling": sd2upscaling, - "seed": seed, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - ObjectInpaintingPageStatusResponse, - parse_obj_as( - type_=ObjectInpaintingPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def portrait( - self, - *, - input_image: str, - text_prompt: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - face_scale: typing.Optional[float] = OMIT, - face_pos_x: typing.Optional[float] = OMIT, - face_pos_y: typing.Optional[float] = OMIT, - selected_model: typing.Optional[FaceInpaintingPageRequestSelectedModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - upscale_factor: typing.Optional[float] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - seed: typing.Optional[int] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[FaceInpaintingPageOutput]: - """ - Parameters - ---------- - input_image : str - - text_prompt : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - face_scale : typing.Optional[float] - - face_pos_x : typing.Optional[float] - - face_pos_y : typing.Optional[float] - - selected_model : typing.Optional[FaceInpaintingPageRequestSelectedModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - upscale_factor : typing.Optional[float] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - seed : typing.Optional[int] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.Optional[FaceInpaintingPageOutput] - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.portrait( - input_image="input_image", - text_prompt="tony stark from the iron man", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/FaceInpainting/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "text_prompt": text_prompt, - "face_scale": face_scale, - "face_pos_x": face_pos_x, - "face_pos_y": face_pos_y, - "selected_model": selected_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "upscale_factor": upscale_factor, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "seed": seed, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - FaceInpaintingPageStatusResponse, - parse_obj_as( - type_=FaceInpaintingPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def image_from_email( - self, - *, - text_prompt: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - email_address: typing.Optional[str] = OMIT, - twitter_handle: typing.Optional[str] = OMIT, - face_scale: typing.Optional[float] = OMIT, - face_pos_x: typing.Optional[float] = OMIT, - face_pos_y: typing.Optional[float] = OMIT, - selected_model: typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - upscale_factor: typing.Optional[float] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - should_send_email: typing.Optional[bool] = OMIT, - email_from: typing.Optional[str] = OMIT, - email_cc: typing.Optional[str] = OMIT, - email_bcc: typing.Optional[str] = OMIT, - email_subject: typing.Optional[str] = OMIT, - email_body: typing.Optional[str] = OMIT, - email_body_enable_html: typing.Optional[bool] = OMIT, - fallback_email_body: typing.Optional[str] = OMIT, - seed: typing.Optional[int] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[EmailFaceInpaintingPageOutput]: - """ - Parameters - ---------- - text_prompt : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - email_address : typing.Optional[str] - - twitter_handle : typing.Optional[str] - - face_scale : typing.Optional[float] - - face_pos_x : typing.Optional[float] - - face_pos_y : typing.Optional[float] - - selected_model : typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - upscale_factor : typing.Optional[float] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - should_send_email : typing.Optional[bool] - - email_from : typing.Optional[str] - - email_cc : typing.Optional[str] - - email_bcc : typing.Optional[str] - - email_subject : typing.Optional[str] - - email_body : typing.Optional[str] - - email_body_enable_html : typing.Optional[bool] - - fallback_email_body : typing.Optional[str] - - seed : typing.Optional[int] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.Optional[EmailFaceInpaintingPageOutput] - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.image_from_email( - email_address="sean@dara.network", - text_prompt="winter's day in paris", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/EmailFaceInpainting/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "email_address": email_address, - "twitter_handle": twitter_handle, - "text_prompt": text_prompt, - "face_scale": face_scale, - "face_pos_x": face_pos_x, - "face_pos_y": face_pos_y, - "selected_model": selected_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "upscale_factor": upscale_factor, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "should_send_email": should_send_email, - "email_from": email_from, - "email_cc": email_cc, - "email_bcc": email_bcc, - "email_subject": email_subject, - "email_body": email_body, - "email_body_enable_html": email_body_enable_html, - "fallback_email_body": fallback_email_body, - "seed": seed, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - EmailFaceInpaintingPageStatusResponse, - parse_obj_as( - type_=EmailFaceInpaintingPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def image_from_web_search( - self, - *, - search_query: str, - text_prompt: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, - scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - selected_model: typing.Optional[GoogleImageGenPageRequestSelectedModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - prompt_strength: typing.Optional[float] = OMIT, - sd2upscaling: typing.Optional[bool] = OMIT, - seed: typing.Optional[int] = OMIT, - image_guidance_scale: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[GoogleImageGenPageOutput]: - """ - Parameters - ---------- - search_query : str - - text_prompt : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.Sequence[str]] - DEPRECATED: use `serp_search_location` instead - - selected_model : typing.Optional[GoogleImageGenPageRequestSelectedModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - prompt_strength : typing.Optional[float] - - sd2upscaling : typing.Optional[bool] - - seed : typing.Optional[int] - - image_guidance_scale : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.Optional[GoogleImageGenPageOutput] - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.image_from_web_search( - search_query="search_query", - text_prompt="text_prompt", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/GoogleImageGen/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "search_query": search_query, - "text_prompt": text_prompt, - "selected_model": selected_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "guidance_scale": guidance_scale, - "prompt_strength": prompt_strength, - "sd_2_upscaling": sd2upscaling, - "seed": seed, - "image_guidance_scale": image_guidance_scale, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - GoogleImageGenPageStatusResponse, - parse_obj_as( - type_=GoogleImageGenPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def remove_background( - self, - *, - input_image: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - selected_model: typing.Optional[ImageSegmentationPageRequestSelectedModel] = OMIT, - mask_threshold: typing.Optional[float] = OMIT, - rect_persepective_transform: typing.Optional[bool] = OMIT, - reflection_opacity: typing.Optional[float] = OMIT, - obj_scale: typing.Optional[float] = OMIT, - obj_pos_x: typing.Optional[float] = OMIT, - obj_pos_y: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[ImageSegmentationPageOutput]: - """ - Parameters - ---------- - input_image : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - selected_model : typing.Optional[ImageSegmentationPageRequestSelectedModel] - - mask_threshold : typing.Optional[float] - - rect_persepective_transform : typing.Optional[bool] - - reflection_opacity : typing.Optional[float] - - obj_scale : typing.Optional[float] - - obj_pos_x : typing.Optional[float] - - obj_pos_y : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.Optional[ImageSegmentationPageOutput] - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.remove_background( - input_image="input_image", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/ImageSegmentation/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "selected_model": selected_model, - "mask_threshold": mask_threshold, - "rect_persepective_transform": rect_persepective_transform, - "reflection_opacity": reflection_opacity, - "obj_scale": obj_scale, - "obj_pos_x": obj_pos_x, - "obj_pos_y": obj_pos_y, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - ImageSegmentationPageStatusResponse, - parse_obj_as( - type_=ImageSegmentationPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def upscale( - self, - *, - scale: int, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - input_image: typing.Optional[str] = OMIT, - input_video: typing.Optional[str] = OMIT, - selected_models: typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]] = OMIT, - selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[CompareUpscalerPageOutput]: - """ - Parameters - ---------- - scale : int - The final upsampling scale of the image - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - input_image : typing.Optional[str] - Input Image - - input_video : typing.Optional[str] - Input Video - - selected_models : typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]] - - selected_bg_model : typing.Optional[typing.Literal["real_esrgan_x2"]] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.Optional[CompareUpscalerPageOutput] - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.upscale( - scale=1, - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/compare-ai-upscalers/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "input_video": input_video, - "scale": scale, - "selected_models": selected_models, - "selected_bg_model": selected_bg_model, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - CompareUpscalerPageStatusResponse, - parse_obj_as( - type_=CompareUpscalerPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def embed( - self, - *, - texts: typing.Sequence[str], - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - selected_model: typing.Optional[EmbeddingsPageRequestSelectedModel] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[EmbeddingsPageOutput]: - """ - Parameters - ---------- - texts : typing.Sequence[str] - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - selected_model : typing.Optional[EmbeddingsPageRequestSelectedModel] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.Optional[EmbeddingsPageOutput] - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.embed( - texts=["texts"], - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/embeddings/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "texts": texts, - "selected_model": selected_model, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - EmbeddingsPageStatusResponse, - parse_obj_as( - type_=EmbeddingsPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def seo_people_also_ask_doc( - self, - *, - search_query: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - keyword_query: typing.Optional[RelatedQnADocPageRequestKeywordQuery] = OMIT, - documents: typing.Optional[typing.Sequence[str]] = OMIT, - max_references: typing.Optional[int] = OMIT, - max_context_words: typing.Optional[int] = OMIT, - scroll_jump: typing.Optional[int] = OMIT, - doc_extract_url: typing.Optional[str] = OMIT, - embedding_model: typing.Optional[RelatedQnADocPageRequestEmbeddingModel] = OMIT, - dense_weight: typing.Optional[float] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - query_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[RelatedQnADocPageRequestSelectedModel] = OMIT, - citation_style: typing.Optional[RelatedQnADocPageRequestCitationStyle] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[RelatedQnADocPageRequestResponseFormatType] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, - scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - serp_search_type: typing.Optional[SerpSearchType] = OMIT, - scaleserp_search_field: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[RelatedQnADocPageOutput]: - """ - Parameters - ---------- - search_query : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - keyword_query : typing.Optional[RelatedQnADocPageRequestKeywordQuery] - - documents : typing.Optional[typing.Sequence[str]] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - doc_extract_url : typing.Optional[str] - - embedding_model : typing.Optional[RelatedQnADocPageRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - selected_model : typing.Optional[RelatedQnADocPageRequestSelectedModel] - - citation_style : typing.Optional[RelatedQnADocPageRequestCitationStyle] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[RelatedQnADocPageRequestResponseFormatType] - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.Sequence[str]] - DEPRECATED: use `serp_search_location` instead - - serp_search_type : typing.Optional[SerpSearchType] - - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.Optional[RelatedQnADocPageOutput] - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.seo_people_also_ask_doc( - search_query="search_query", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/related-qna-maker-doc/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "search_query": search_query, - "keyword_query": keyword_query, - "documents": documents, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "doc_extract_url": doc_extract_url, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "selected_model": selected_model, - "citation_style": citation_style, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - RelatedQnADocPageStatusResponse, - parse_obj_as( - type_=RelatedQnADocPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def health_status_get( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Optional[typing.Any]: - """ - Parameters - ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.Optional[typing.Any] - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.health_status_get() - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "status", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def post_v3chyron_plant_async_form( - self, - *, - midi_notes: str, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - midi_notes_prompt: typing.Optional[str] = None, - chyron_prompt: typing.Optional[str] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> ChyronPlantPageStatusResponse: - """ - Parameters - ---------- - midi_notes : str - - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - midi_notes_prompt : typing.Optional[str] - - chyron_prompt : typing.Optional[str] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ChyronPlantPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.post_v3chyron_plant_async_form( - midi_notes="midi_notes", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/ChyronPlant/async/form", - method="POST", - data={ - "functions": functions, - "variables": variables, - "midi_notes": midi_notes, - "midi_notes_prompt": midi_notes_prompt, - "chyron_prompt": chyron_prompt, - "settings": settings, - }, - files={}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - ChyronPlantPageStatusResponse, - parse_obj_as( - type_=ChyronPlantPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def post_v3compare_llm_async_form( - self, - *, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - input_prompt: typing.Optional[str] = None, - selected_models: typing.Optional[typing.List[PostV3CompareLlmAsyncFormRequestSelectedModelsItem]] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[PostV3CompareLlmAsyncFormRequestResponseFormatType] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> CompareLlmPageStatusResponse: - """ - Parameters - ---------- - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - input_prompt : typing.Optional[str] - - selected_models : typing.Optional[typing.List[PostV3CompareLlmAsyncFormRequestSelectedModelsItem]] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[PostV3CompareLlmAsyncFormRequestResponseFormatType] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - CompareLlmPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.post_v3compare_llm_async_form() - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/CompareLLM/async/form", - method="POST", - data={ - "functions": functions, - "variables": variables, - "input_prompt": input_prompt, - "selected_models": selected_models, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "settings": settings, - }, - files={}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - CompareLlmPageStatusResponse, - parse_obj_as( - type_=CompareLlmPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def post_v3compare_text2img_async_form( - self, - *, - text_prompt: str, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - negative_prompt: typing.Optional[str] = None, - output_width: typing.Optional[int] = None, - output_height: typing.Optional[int] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[int] = None, - dall_e3quality: typing.Optional[str] = None, - dall_e3style: typing.Optional[str] = None, - guidance_scale: typing.Optional[float] = None, - seed: typing.Optional[int] = None, - sd2upscaling: typing.Optional[bool] = None, - selected_models: typing.Optional[typing.List[PostV3CompareText2ImgAsyncFormRequestSelectedModelsItem]] = None, - scheduler: typing.Optional[PostV3CompareText2ImgAsyncFormRequestScheduler] = None, - edit_instruction: typing.Optional[str] = None, - image_guidance_scale: typing.Optional[float] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> CompareText2ImgPageStatusResponse: - """ - Parameters - ---------- - text_prompt : str - - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - negative_prompt : typing.Optional[str] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - dall_e3quality : typing.Optional[str] - - dall_e3style : typing.Optional[str] - - guidance_scale : typing.Optional[float] - - seed : typing.Optional[int] - - sd2upscaling : typing.Optional[bool] - - selected_models : typing.Optional[typing.List[PostV3CompareText2ImgAsyncFormRequestSelectedModelsItem]] - - scheduler : typing.Optional[PostV3CompareText2ImgAsyncFormRequestScheduler] - - edit_instruction : typing.Optional[str] - - image_guidance_scale : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - CompareText2ImgPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.post_v3compare_text2img_async_form( - text_prompt="text_prompt", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/CompareText2Img/async/form", - method="POST", - data={ - "functions": functions, - "variables": variables, - "text_prompt": text_prompt, - "negative_prompt": negative_prompt, - "output_width": output_width, - "output_height": output_height, - "num_outputs": num_outputs, - "quality": quality, - "dall_e_3_quality": dall_e3quality, - "dall_e_3_style": dall_e3style, - "guidance_scale": guidance_scale, - "seed": seed, - "sd_2_upscaling": sd2upscaling, - "selected_models": selected_models, - "scheduler": scheduler, - "edit_instruction": edit_instruction, - "image_guidance_scale": image_guidance_scale, - "settings": settings, - }, - files={}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - CompareText2ImgPageStatusResponse, - parse_obj_as( - type_=CompareText2ImgPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def post_v3deforum_sd_async_form( - self, - *, - animation_prompts: typing.List[AnimationPrompt], - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - max_frames: typing.Optional[int] = None, - selected_model: typing.Optional[PostV3DeforumSdAsyncFormRequestSelectedModel] = None, - animation_mode: typing.Optional[str] = None, - zoom: typing.Optional[str] = None, - translation_x: typing.Optional[str] = None, - translation_y: typing.Optional[str] = None, - rotation3d_x: typing.Optional[str] = None, - rotation3d_y: typing.Optional[str] = None, - rotation3d_z: typing.Optional[str] = None, - fps: typing.Optional[int] = None, - seed: typing.Optional[int] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> DeforumSdPageStatusResponse: - """ - Parameters - ---------- - animation_prompts : typing.List[AnimationPrompt] - - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - max_frames : typing.Optional[int] - - selected_model : typing.Optional[PostV3DeforumSdAsyncFormRequestSelectedModel] - - animation_mode : typing.Optional[str] - - zoom : typing.Optional[str] - - translation_x : typing.Optional[str] - - translation_y : typing.Optional[str] - - rotation3d_x : typing.Optional[str] - - rotation3d_y : typing.Optional[str] - - rotation3d_z : typing.Optional[str] - - fps : typing.Optional[int] - - seed : typing.Optional[int] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DeforumSdPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AnimationPrompt, AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.post_v3deforum_sd_async_form( - animation_prompts=[ - AnimationPrompt( - frame="frame", - prompt="prompt", + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), ) - ], - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/DeforumSD/async/form", - method="POST", - data={ - "functions": functions, - "variables": variables, - "animation_prompts": animation_prompts, - "max_frames": max_frames, - "selected_model": selected_model, - "animation_mode": animation_mode, - "zoom": zoom, - "translation_x": translation_x, - "translation_y": translation_y, - "rotation_3d_x": rotation3d_x, - "rotation_3d_y": rotation3d_y, - "rotation_3d_z": rotation3d_z, - "fps": fps, - "seed": seed, - "settings": settings, - }, - files={}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - DeforumSdPageStatusResponse, - parse_obj_as( - type_=DeforumSdPageStatusResponse, # type: ignore - object_=_response.json(), - ), ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3email_face_inpainting_async_form( + async def qr_code( self, *, text_prompt: str, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - email_address: typing.Optional[str] = None, - twitter_handle: typing.Optional[str] = None, - face_scale: typing.Optional[float] = None, - face_pos_x: typing.Optional[float] = None, - face_pos_y: typing.Optional[float] = None, - selected_model: typing.Optional[PostV3EmailFaceInpaintingAsyncFormRequestSelectedModel] = None, - negative_prompt: typing.Optional[str] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[int] = None, - upscale_factor: typing.Optional[float] = None, - output_width: typing.Optional[int] = None, - output_height: typing.Optional[int] = None, - guidance_scale: typing.Optional[float] = None, - should_send_email: typing.Optional[bool] = None, - email_from: typing.Optional[str] = None, - email_cc: typing.Optional[str] = None, - email_bcc: typing.Optional[str] = None, - email_subject: typing.Optional[str] = None, - email_body: typing.Optional[str] = None, - email_body_enable_html: typing.Optional[bool] = None, - fallback_email_body: typing.Optional[str] = None, - seed: typing.Optional[int] = None, - settings: typing.Optional[RunSettings] = None, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + qr_code_data: typing.Optional[str] = OMIT, + qr_code_input_image: typing.Optional[str] = OMIT, + qr_code_vcard: typing.Optional[Vcard] = OMIT, + qr_code_file: typing.Optional[str] = OMIT, + use_url_shortener: typing.Optional[bool] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + image_prompt: typing.Optional[str] = OMIT, + image_prompt_controlnet_models: typing.Optional[ + typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem] + ] = OMIT, + image_prompt_strength: typing.Optional[float] = OMIT, + image_prompt_scale: typing.Optional[float] = OMIT, + image_prompt_pos_x: typing.Optional[float] = OMIT, + image_prompt_pos_y: typing.Optional[float] = OMIT, + selected_model: typing.Optional[QrCodeGeneratorPageRequestSelectedModel] = OMIT, + selected_controlnet_model: typing.Optional[ + typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem] + ] = OMIT, + output_width: typing.Optional[int] = OMIT, + output_height: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + scheduler: typing.Optional[QrCodeGeneratorPageRequestScheduler] = OMIT, + seed: typing.Optional[int] = OMIT, + obj_scale: typing.Optional[float] = OMIT, + obj_pos_x: typing.Optional[float] = OMIT, + obj_pos_y: typing.Optional[float] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> EmailFaceInpaintingPageStatusResponse: + ) -> typing.Optional[QrCodeGeneratorPageOutput]: """ Parameters ---------- text_prompt : str - functions : typing.Optional[typing.List[RecipeFunction]] + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - email_address : typing.Optional[str] - - twitter_handle : typing.Optional[str] + qr_code_data : typing.Optional[str] - face_scale : typing.Optional[float] + qr_code_input_image : typing.Optional[str] - face_pos_x : typing.Optional[float] + qr_code_vcard : typing.Optional[Vcard] - face_pos_y : typing.Optional[float] + qr_code_file : typing.Optional[str] - selected_model : typing.Optional[PostV3EmailFaceInpaintingAsyncFormRequestSelectedModel] + use_url_shortener : typing.Optional[bool] negative_prompt : typing.Optional[str] - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - upscale_factor : typing.Optional[float] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - should_send_email : typing.Optional[bool] - - email_from : typing.Optional[str] - - email_cc : typing.Optional[str] - - email_bcc : typing.Optional[str] - - email_subject : typing.Optional[str] - - email_body : typing.Optional[str] - - email_body_enable_html : typing.Optional[bool] - - fallback_email_body : typing.Optional[str] - - seed : typing.Optional[int] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EmailFaceInpaintingPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.post_v3email_face_inpainting_async_form( - text_prompt="text_prompt", - ) + image_prompt : typing.Optional[str] + image_prompt_controlnet_models : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]] - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/EmailFaceInpainting/async/form", - method="POST", - data={ - "functions": functions, - "variables": variables, - "email_address": email_address, - "twitter_handle": twitter_handle, - "text_prompt": text_prompt, - "face_scale": face_scale, - "face_pos_x": face_pos_x, - "face_pos_y": face_pos_y, - "selected_model": selected_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "upscale_factor": upscale_factor, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "should_send_email": should_send_email, - "email_from": email_from, - "email_cc": email_cc, - "email_bcc": email_bcc, - "email_subject": email_subject, - "email_body": email_body, - "email_body_enable_html": email_body_enable_html, - "fallback_email_body": fallback_email_body, - "seed": seed, - "settings": settings, - }, - files={}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - EmailFaceInpaintingPageStatusResponse, - parse_obj_as( - type_=EmailFaceInpaintingPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + image_prompt_strength : typing.Optional[float] - async def post_v3face_inpainting_async_form( - self, - *, - input_image: str, - text_prompt: str, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - face_scale: typing.Optional[float] = None, - face_pos_x: typing.Optional[float] = None, - face_pos_y: typing.Optional[float] = None, - selected_model: typing.Optional[PostV3FaceInpaintingAsyncFormRequestSelectedModel] = None, - negative_prompt: typing.Optional[str] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[int] = None, - upscale_factor: typing.Optional[float] = None, - output_width: typing.Optional[int] = None, - output_height: typing.Optional[int] = None, - guidance_scale: typing.Optional[float] = None, - seed: typing.Optional[int] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> FaceInpaintingPageStatusResponse: - """ - Parameters - ---------- - input_image : str + image_prompt_scale : typing.Optional[float] - text_prompt : str + image_prompt_pos_x : typing.Optional[float] - functions : typing.Optional[typing.List[RecipeFunction]] + image_prompt_pos_y : typing.Optional[float] - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments + selected_model : typing.Optional[QrCodeGeneratorPageRequestSelectedModel] - face_scale : typing.Optional[float] + selected_controlnet_model : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem]] - face_pos_x : typing.Optional[float] + output_width : typing.Optional[int] - face_pos_y : typing.Optional[float] + output_height : typing.Optional[int] - selected_model : typing.Optional[PostV3FaceInpaintingAsyncFormRequestSelectedModel] + guidance_scale : typing.Optional[float] - negative_prompt : typing.Optional[str] + controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]] num_outputs : typing.Optional[int] quality : typing.Optional[int] - upscale_factor : typing.Optional[float] + scheduler : typing.Optional[QrCodeGeneratorPageRequestScheduler] - output_width : typing.Optional[int] + seed : typing.Optional[int] - output_height : typing.Optional[int] + obj_scale : typing.Optional[float] - guidance_scale : typing.Optional[float] + obj_pos_x : typing.Optional[float] - seed : typing.Optional[int] + obj_pos_y : typing.Optional[float] settings : typing.Optional[RunSettings] @@ -13671,7 +4704,7 @@ async def post_v3face_inpainting_async_form( Returns ------- - FaceInpaintingPageStatusResponse + typing.Optional[QrCodeGeneratorPageOutput] Successful Response Examples @@ -13686,8 +4719,7 @@ async def post_v3face_inpainting_async_form( async def main() -> None: - await client.post_v3face_inpainting_async_form( - input_image="input_image", + await client.qr_code( text_prompt="text_prompt", ) @@ -13695,209 +4727,177 @@ async def main() -> None: asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/FaceInpainting/async/form", + "v3/art-qr-code/async", method="POST", - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, - "input_image": input_image, + "qr_code_data": qr_code_data, + "qr_code_input_image": qr_code_input_image, + "qr_code_vcard": qr_code_vcard, + "qr_code_file": qr_code_file, + "use_url_shortener": use_url_shortener, "text_prompt": text_prompt, - "face_scale": face_scale, - "face_pos_x": face_pos_x, - "face_pos_y": face_pos_y, - "selected_model": selected_model, "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "upscale_factor": upscale_factor, + "image_prompt": image_prompt, + "image_prompt_controlnet_models": image_prompt_controlnet_models, + "image_prompt_strength": image_prompt_strength, + "image_prompt_scale": image_prompt_scale, + "image_prompt_pos_x": image_prompt_pos_x, + "image_prompt_pos_y": image_prompt_pos_y, + "selected_model": selected_model, + "selected_controlnet_model": selected_controlnet_model, "output_width": output_width, "output_height": output_height, "guidance_scale": guidance_scale, + "controlnet_conditioning_scale": controlnet_conditioning_scale, + "num_outputs": num_outputs, + "quality": quality, + "scheduler": scheduler, "seed": seed, + "obj_scale": obj_scale, + "obj_pos_x": obj_pos_x, + "obj_pos_y": obj_pos_y, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast( - FaceInpaintingPageStatusResponse, + _parsed_response = typing.cast( + QrCodeGeneratorPageStatusResponse, parse_obj_as( - type_=FaceInpaintingPageStatusResponse, # type: ignore + type_=QrCodeGeneratorPageStatusResponse, # type: ignore object_=_response.json(), ), ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3google_image_gen_async_form( + async def seo_people_also_ask( self, *, search_query: str, - text_prompt: str, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - serp_search_location: typing.Optional[SerpSearchLocation] = None, - scaleserp_locations: typing.Optional[typing.List[str]] = None, - selected_model: typing.Optional[PostV3GoogleImageGenAsyncFormRequestSelectedModel] = None, - negative_prompt: typing.Optional[str] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[int] = None, - guidance_scale: typing.Optional[float] = None, - prompt_strength: typing.Optional[float] = None, - sd2upscaling: typing.Optional[bool] = None, - seed: typing.Optional[int] = None, - image_guidance_scale: typing.Optional[float] = None, - settings: typing.Optional[RunSettings] = None, + site_filter: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + query_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[RelatedQnAPageRequestSelectedModel] = OMIT, + max_search_urls: typing.Optional[int] = OMIT, + max_references: typing.Optional[int] = OMIT, + max_context_words: typing.Optional[int] = OMIT, + scroll_jump: typing.Optional[int] = OMIT, + embedding_model: typing.Optional[RelatedQnAPageRequestEmbeddingModel] = OMIT, + dense_weight: typing.Optional[float] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[RelatedQnAPageRequestResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, + serp_search_type: typing.Optional[SerpSearchType] = OMIT, + scaleserp_search_field: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> GoogleImageGenPageStatusResponse: + ) -> typing.Optional[RelatedQnAPageOutput]: """ Parameters ---------- search_query : str - text_prompt : str + site_filter : str - functions : typing.Optional[typing.List[RecipeFunction]] + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.List[str]] - DEPRECATED: use `serp_search_location` instead - - selected_model : typing.Optional[PostV3GoogleImageGenAsyncFormRequestSelectedModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - prompt_strength : typing.Optional[float] - - sd2upscaling : typing.Optional[bool] - - seed : typing.Optional[int] - - image_guidance_scale : typing.Optional[float] + task_instructions : typing.Optional[str] - settings : typing.Optional[RunSettings] + query_instructions : typing.Optional[str] - request_options : typing.Optional[RequestOptions] - Request-specific configuration. + selected_model : typing.Optional[RelatedQnAPageRequestSelectedModel] - Returns - ------- - GoogleImageGenPageStatusResponse - Successful Response + max_search_urls : typing.Optional[int] - Examples - -------- - import asyncio + max_references : typing.Optional[int] - from gooey import AsyncGooey + max_context_words : typing.Optional[int] - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) + scroll_jump : typing.Optional[int] + embedding_model : typing.Optional[RelatedQnAPageRequestEmbeddingModel] - async def main() -> None: - await client.post_v3google_image_gen_async_form( - search_query="search_query", - text_prompt="text_prompt", - ) + dense_weight : typing.Optional[float] + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/GoogleImageGen/async/form", - method="POST", - data={ - "functions": functions, - "variables": variables, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "search_query": search_query, - "text_prompt": text_prompt, - "selected_model": selected_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "guidance_scale": guidance_scale, - "prompt_strength": prompt_strength, - "sd_2_upscaling": sd2upscaling, - "seed": seed, - "image_guidance_scale": image_guidance_scale, - "settings": settings, - }, - files={}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - GoogleImageGenPageStatusResponse, - parse_obj_as( - type_=GoogleImageGenPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3image_segmentation_async_form( - self, - *, - input_image: str, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - selected_model: typing.Optional[PostV3ImageSegmentationAsyncFormRequestSelectedModel] = None, - mask_threshold: typing.Optional[float] = None, - rect_persepective_transform: typing.Optional[bool] = None, - reflection_opacity: typing.Optional[float] = None, - obj_scale: typing.Optional[float] = None, - obj_pos_x: typing.Optional[float] = None, - obj_pos_y: typing.Optional[float] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> ImageSegmentationPageStatusResponse: - """ - Parameters - ---------- - input_image : str + avoid_repetition : typing.Optional[bool] - functions : typing.Optional[typing.List[RecipeFunction]] + num_outputs : typing.Optional[int] - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments + quality : typing.Optional[float] - selected_model : typing.Optional[PostV3ImageSegmentationAsyncFormRequestSelectedModel] + max_tokens : typing.Optional[int] - mask_threshold : typing.Optional[float] + sampling_temperature : typing.Optional[float] - rect_persepective_transform : typing.Optional[bool] + response_format_type : typing.Optional[RelatedQnAPageRequestResponseFormatType] - reflection_opacity : typing.Optional[float] + serp_search_location : typing.Optional[SerpSearchLocation] - obj_scale : typing.Optional[float] + scaleserp_locations : typing.Optional[typing.Sequence[str]] + DEPRECATED: use `serp_search_location` instead - obj_pos_x : typing.Optional[float] + serp_search_type : typing.Optional[SerpSearchType] - obj_pos_y : typing.Optional[float] + scaleserp_search_field : typing.Optional[str] + DEPRECATED: use `serp_search_type` instead settings : typing.Optional[RunSettings] @@ -13906,7 +4906,7 @@ async def post_v3image_segmentation_async_form( Returns ------- - ImageSegmentationPageStatusResponse + typing.Optional[RelatedQnAPageOutput] Successful Response Examples @@ -13921,104 +4921,167 @@ async def post_v3image_segmentation_async_form( async def main() -> None: - await client.post_v3image_segmentation_async_form( - input_image="input_image", + await client.seo_people_also_ask( + search_query="search_query", + site_filter="site_filter", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/ImageSegmentation/async/form", + "v3/related-qna-maker/async", method="POST", - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, - "input_image": input_image, + "search_query": search_query, + "site_filter": site_filter, + "task_instructions": task_instructions, + "query_instructions": query_instructions, "selected_model": selected_model, - "mask_threshold": mask_threshold, - "rect_persepective_transform": rect_persepective_transform, - "reflection_opacity": reflection_opacity, - "obj_scale": obj_scale, - "obj_pos_x": obj_pos_x, - "obj_pos_y": obj_pos_y, + "max_search_urls": max_search_urls, + "max_references": max_references, + "max_context_words": max_context_words, + "scroll_jump": scroll_jump, + "embedding_model": embedding_model, + "dense_weight": dense_weight, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "serp_search_type": serp_search_type, + "scaleserp_search_field": scaleserp_search_field, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast( - ImageSegmentationPageStatusResponse, + _parsed_response = typing.cast( + RelatedQnAPageStatusResponse, parse_obj_as( - type_=ImageSegmentationPageStatusResponse, # type: ignore + type_=RelatedQnAPageStatusResponse, # type: ignore object_=_response.json(), ), ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3img2img_async_form( + async def seo_content( self, *, - input_image: str, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - text_prompt: typing.Optional[str] = None, - selected_model: typing.Optional[PostV3Img2ImgAsyncFormRequestSelectedModel] = None, - selected_controlnet_model: typing.Optional[PostV3Img2ImgAsyncFormRequestSelectedControlnetModel] = None, - negative_prompt: typing.Optional[str] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[int] = None, - output_width: typing.Optional[int] = None, - output_height: typing.Optional[int] = None, - guidance_scale: typing.Optional[float] = None, - prompt_strength: typing.Optional[float] = None, - controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None, - seed: typing.Optional[int] = None, - image_guidance_scale: typing.Optional[float] = None, - settings: typing.Optional[RunSettings] = None, + search_query: str, + keywords: str, + title: str, + company_url: str, + example_id: typing.Optional[str] = None, + task_instructions: typing.Optional[str] = OMIT, + enable_html: typing.Optional[bool] = OMIT, + selected_model: typing.Optional[SeoSummaryPageRequestSelectedModel] = OMIT, + max_search_urls: typing.Optional[int] = OMIT, + enable_crosslinks: typing.Optional[bool] = OMIT, + seed: typing.Optional[int] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[SeoSummaryPageRequestResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, + serp_search_type: typing.Optional[SerpSearchType] = OMIT, + scaleserp_search_field: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> Img2ImgPageStatusResponse: + ) -> typing.Optional[SeoSummaryPageOutput]: """ Parameters ---------- - input_image : str + search_query : str - functions : typing.Optional[typing.List[RecipeFunction]] + keywords : str - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments + title : str - text_prompt : typing.Optional[str] + company_url : str + + example_id : typing.Optional[str] - selected_model : typing.Optional[PostV3Img2ImgAsyncFormRequestSelectedModel] + task_instructions : typing.Optional[str] - selected_controlnet_model : typing.Optional[PostV3Img2ImgAsyncFormRequestSelectedControlnetModel] + enable_html : typing.Optional[bool] - negative_prompt : typing.Optional[str] + selected_model : typing.Optional[SeoSummaryPageRequestSelectedModel] + + max_search_urls : typing.Optional[int] + + enable_crosslinks : typing.Optional[bool] + + seed : typing.Optional[int] + + avoid_repetition : typing.Optional[bool] num_outputs : typing.Optional[int] - quality : typing.Optional[int] + quality : typing.Optional[float] - output_width : typing.Optional[int] + max_tokens : typing.Optional[int] - output_height : typing.Optional[int] + sampling_temperature : typing.Optional[float] - guidance_scale : typing.Optional[float] + response_format_type : typing.Optional[SeoSummaryPageRequestResponseFormatType] - prompt_strength : typing.Optional[float] + serp_search_location : typing.Optional[SerpSearchLocation] - controlnet_conditioning_scale : typing.Optional[typing.List[float]] + scaleserp_locations : typing.Optional[typing.Sequence[str]] + DEPRECATED: use `serp_search_location` instead - seed : typing.Optional[int] + serp_search_type : typing.Optional[SerpSearchType] - image_guidance_scale : typing.Optional[float] + scaleserp_search_field : typing.Optional[str] + DEPRECATED: use `serp_search_type` instead settings : typing.Optional[RunSettings] @@ -14027,7 +5090,7 @@ async def post_v3img2img_async_form( Returns ------- - Img2ImgPageStatusResponse + typing.Optional[SeoSummaryPageOutput] Successful Response Examples @@ -14042,110 +5105,180 @@ async def post_v3img2img_async_form( async def main() -> None: - await client.post_v3img2img_async_form( - input_image="input_image", + await client.seo_content( + search_query="search_query", + keywords="keywords", + title="title", + company_url="company_url", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/Img2Img/async/form", + "v3/SEOSummary/async", method="POST", - data={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "text_prompt": text_prompt, + params={ + "example_id": example_id, + }, + json={ + "search_query": search_query, + "keywords": keywords, + "title": title, + "company_url": company_url, + "task_instructions": task_instructions, + "enable_html": enable_html, "selected_model": selected_model, - "selected_controlnet_model": selected_controlnet_model, - "negative_prompt": negative_prompt, + "max_search_urls": max_search_urls, + "enable_crosslinks": enable_crosslinks, + "seed": seed, + "avoid_repetition": avoid_repetition, "num_outputs": num_outputs, "quality": quality, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "prompt_strength": prompt_strength, - "controlnet_conditioning_scale": controlnet_conditioning_scale, - "seed": seed, - "image_guidance_scale": image_guidance_scale, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "serp_search_type": serp_search_type, + "scaleserp_search_field": scaleserp_search_field, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast( - Img2ImgPageStatusResponse, + _parsed_response = typing.cast( + SeoSummaryPageStatusResponse, parse_obj_as( - type_=Img2ImgPageStatusResponse, # type: ignore + type_=SeoSummaryPageStatusResponse, # type: ignore object_=_response.json(), ), ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3letter_writer_async_form( + async def web_search_llm( self, *, - action_id: str, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - prompt_header: typing.Optional[str] = None, - example_letters: typing.Optional[typing.List[TrainingDataModel]] = None, - lm_selected_api: typing.Optional[str] = None, - lm_selected_engine: typing.Optional[str] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - lm_sampling_temperature: typing.Optional[float] = None, - api_http_method: typing.Optional[str] = None, - api_url: typing.Optional[str] = None, - api_headers: typing.Optional[str] = None, - api_json_body: typing.Optional[str] = None, - input_prompt: typing.Optional[str] = None, - strip_html2text: typing.Optional[bool] = None, - settings: typing.Optional[RunSettings] = None, + search_query: str, + site_filter: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + query_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[GoogleGptPageRequestSelectedModel] = OMIT, + max_search_urls: typing.Optional[int] = OMIT, + max_references: typing.Optional[int] = OMIT, + max_context_words: typing.Optional[int] = OMIT, + scroll_jump: typing.Optional[int] = OMIT, + embedding_model: typing.Optional[GoogleGptPageRequestEmbeddingModel] = OMIT, + dense_weight: typing.Optional[float] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[GoogleGptPageRequestResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, + serp_search_type: typing.Optional[SerpSearchType] = OMIT, + scaleserp_search_field: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> LetterWriterPageStatusResponse: + ) -> typing.Optional[GoogleGptPageOutput]: """ Parameters ---------- - action_id : str + search_query : str + + site_filter : str + + example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - prompt_header : typing.Optional[str] + task_instructions : typing.Optional[str] + + query_instructions : typing.Optional[str] + + selected_model : typing.Optional[GoogleGptPageRequestSelectedModel] + + max_search_urls : typing.Optional[int] + + max_references : typing.Optional[int] + + max_context_words : typing.Optional[int] + + scroll_jump : typing.Optional[int] + + embedding_model : typing.Optional[GoogleGptPageRequestEmbeddingModel] + + dense_weight : typing.Optional[float] - example_letters : typing.Optional[typing.List[TrainingDataModel]] + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - lm_selected_api : typing.Optional[str] - lm_selected_engine : typing.Optional[str] + avoid_repetition : typing.Optional[bool] num_outputs : typing.Optional[int] quality : typing.Optional[float] - lm_sampling_temperature : typing.Optional[float] + max_tokens : typing.Optional[int] - api_http_method : typing.Optional[str] + sampling_temperature : typing.Optional[float] - api_url : typing.Optional[str] + response_format_type : typing.Optional[GoogleGptPageRequestResponseFormatType] - api_headers : typing.Optional[str] + serp_search_location : typing.Optional[SerpSearchLocation] - api_json_body : typing.Optional[str] + scaleserp_locations : typing.Optional[typing.Sequence[str]] + DEPRECATED: use `serp_search_location` instead - input_prompt : typing.Optional[str] + serp_search_type : typing.Optional[SerpSearchType] - strip_html2text : typing.Optional[bool] + scaleserp_search_field : typing.Optional[str] + DEPRECATED: use `serp_search_type` instead settings : typing.Optional[RunSettings] @@ -14154,7 +5287,7 @@ async def post_v3letter_writer_async_form( Returns ------- - LetterWriterPageStatusResponse + typing.Optional[GoogleGptPageOutput] Successful Response Examples @@ -14169,92 +5302,139 @@ async def post_v3letter_writer_async_form( async def main() -> None: - await client.post_v3letter_writer_async_form( - action_id="action_id", + await client.web_search_llm( + search_query="search_query", + site_filter="site_filter", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/LetterWriter/async/form", + "v3/google-gpt/async", method="POST", - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, - "action_id": action_id, - "prompt_header": prompt_header, - "example_letters": example_letters, - "lm_selected_api": lm_selected_api, - "lm_selected_engine": lm_selected_engine, + "search_query": search_query, + "site_filter": site_filter, + "task_instructions": task_instructions, + "query_instructions": query_instructions, + "selected_model": selected_model, + "max_search_urls": max_search_urls, + "max_references": max_references, + "max_context_words": max_context_words, + "scroll_jump": scroll_jump, + "embedding_model": embedding_model, + "dense_weight": dense_weight, + "avoid_repetition": avoid_repetition, "num_outputs": num_outputs, "quality": quality, - "lm_sampling_temperature": lm_sampling_temperature, - "api_http_method": api_http_method, - "api_url": api_url, - "api_headers": api_headers, - "api_json_body": api_json_body, - "input_prompt": input_prompt, - "strip_html_2_text": strip_html2text, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "serp_search_type": serp_search_type, + "scaleserp_search_field": scaleserp_search_field, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast( - LetterWriterPageStatusResponse, + _parsed_response = typing.cast( + GoogleGptPageStatusResponse, parse_obj_as( - type_=LetterWriterPageStatusResponse, # type: ignore + type_=GoogleGptPageStatusResponse, # type: ignore object_=_response.json(), ), ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3lipsync_async_form( + async def personalize_email( self, *, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - input_face: typing.Optional[str] = None, - face_padding_top: typing.Optional[int] = None, - face_padding_bottom: typing.Optional[int] = None, - face_padding_left: typing.Optional[int] = None, - face_padding_right: typing.Optional[int] = None, - sadtalker_settings: typing.Optional[SadTalkerSettings] = None, - selected_model: typing.Optional[PostV3LipsyncAsyncFormRequestSelectedModel] = None, - input_audio: typing.Optional[str] = None, - settings: typing.Optional[RunSettings] = None, + email_address: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + input_prompt: typing.Optional[str] = OMIT, + selected_model: typing.Optional[SocialLookupEmailPageRequestSelectedModel] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[SocialLookupEmailPageRequestResponseFormatType] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> LipsyncPageStatusResponse: + ) -> typing.Optional[SocialLookupEmailPageOutput]: """ Parameters ---------- - functions : typing.Optional[typing.List[RecipeFunction]] + email_address : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - input_face : typing.Optional[str] + input_prompt : typing.Optional[str] - face_padding_top : typing.Optional[int] + selected_model : typing.Optional[SocialLookupEmailPageRequestSelectedModel] - face_padding_bottom : typing.Optional[int] + avoid_repetition : typing.Optional[bool] - face_padding_left : typing.Optional[int] + num_outputs : typing.Optional[int] - face_padding_right : typing.Optional[int] + quality : typing.Optional[float] - sadtalker_settings : typing.Optional[SadTalkerSettings] + max_tokens : typing.Optional[int] - selected_model : typing.Optional[PostV3LipsyncAsyncFormRequestSelectedModel] + sampling_temperature : typing.Optional[float] - input_audio : typing.Optional[str] + response_format_type : typing.Optional[SocialLookupEmailPageRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -14263,7 +5443,7 @@ async def post_v3lipsync_async_form( Returns ------- - LipsyncPageStatusResponse + typing.Optional[SocialLookupEmailPageOutput] Successful Response Examples @@ -14278,139 +5458,132 @@ async def post_v3lipsync_async_form( async def main() -> None: - await client.post_v3lipsync_async_form() + await client.personalize_email( + email_address="email_address", + ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/Lipsync/async/form", + "v3/SocialLookupEmail/async", method="POST", - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, - "input_face": input_face, - "face_padding_top": face_padding_top, - "face_padding_bottom": face_padding_bottom, - "face_padding_left": face_padding_left, - "face_padding_right": face_padding_right, - "sadtalker_settings": sadtalker_settings, + "email_address": email_address, + "input_prompt": input_prompt, "selected_model": selected_model, - "input_audio": input_audio, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast( - LipsyncPageStatusResponse, + _parsed_response = typing.cast( + SocialLookupEmailPageStatusResponse, parse_obj_as( - type_=LipsyncPageStatusResponse, # type: ignore + type_=SocialLookupEmailPageStatusResponse, # type: ignore object_=_response.json(), ), ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3lipsync_tts_async_form( + async def bulk_run( self, *, - text_prompt: str, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - tts_provider: typing.Optional[PostV3LipsyncTtsAsyncFormRequestTtsProvider] = None, - uberduck_voice_name: typing.Optional[str] = None, - uberduck_speaking_rate: typing.Optional[float] = None, - google_voice_name: typing.Optional[str] = None, - google_speaking_rate: typing.Optional[float] = None, - google_pitch: typing.Optional[float] = None, - bark_history_prompt: typing.Optional[str] = None, - elevenlabs_voice_name: typing.Optional[str] = None, - elevenlabs_api_key: typing.Optional[str] = None, - elevenlabs_voice_id: typing.Optional[str] = None, - elevenlabs_model: typing.Optional[str] = None, - elevenlabs_stability: typing.Optional[float] = None, - elevenlabs_similarity_boost: typing.Optional[float] = None, - elevenlabs_style: typing.Optional[float] = None, - elevenlabs_speaker_boost: typing.Optional[bool] = None, - azure_voice_name: typing.Optional[str] = None, - openai_voice_name: typing.Optional[PostV3LipsyncTtsAsyncFormRequestOpenaiVoiceName] = None, - openai_tts_model: typing.Optional[PostV3LipsyncTtsAsyncFormRequestOpenaiTtsModel] = None, - input_face: typing.Optional[str] = None, - face_padding_top: typing.Optional[int] = None, - face_padding_bottom: typing.Optional[int] = None, - face_padding_left: typing.Optional[int] = None, - face_padding_right: typing.Optional[int] = None, - sadtalker_settings: typing.Optional[SadTalkerSettings] = None, - selected_model: typing.Optional[PostV3LipsyncTtsAsyncFormRequestSelectedModel] = None, - settings: typing.Optional[RunSettings] = None, + documents: typing.Sequence[str], + run_urls: typing.Sequence[str], + input_columns: typing.Dict[str, str], + output_columns: typing.Dict[str, str], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + eval_urls: typing.Optional[typing.Sequence[str]] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> LipsyncTtsPageStatusResponse: + ) -> typing.Optional[BulkRunnerPageOutput]: """ Parameters ---------- - text_prompt : str - - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - tts_provider : typing.Optional[PostV3LipsyncTtsAsyncFormRequestTtsProvider] - - uberduck_voice_name : typing.Optional[str] - - uberduck_speaking_rate : typing.Optional[float] - - google_voice_name : typing.Optional[str] - - google_speaking_rate : typing.Optional[float] - - google_pitch : typing.Optional[float] - - bark_history_prompt : typing.Optional[str] - - elevenlabs_voice_name : typing.Optional[str] - Use `elevenlabs_voice_id` instead + documents : typing.Sequence[str] - elevenlabs_api_key : typing.Optional[str] + Upload or link to a CSV or google sheet that contains your sample input data. + For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. + Remember to includes header names in your CSV too. - elevenlabs_voice_id : typing.Optional[str] - elevenlabs_model : typing.Optional[str] + run_urls : typing.Sequence[str] - elevenlabs_stability : typing.Optional[float] + Provide one or more Gooey.AI workflow runs. + You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them. - elevenlabs_similarity_boost : typing.Optional[float] - elevenlabs_style : typing.Optional[float] + input_columns : typing.Dict[str, str] - elevenlabs_speaker_boost : typing.Optional[bool] + For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it. - azure_voice_name : typing.Optional[str] - openai_voice_name : typing.Optional[PostV3LipsyncTtsAsyncFormRequestOpenaiVoiceName] + output_columns : typing.Dict[str, str] - openai_tts_model : typing.Optional[PostV3LipsyncTtsAsyncFormRequestOpenaiTtsModel] + For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data. - input_face : typing.Optional[str] - face_padding_top : typing.Optional[int] + example_id : typing.Optional[str] - face_padding_bottom : typing.Optional[int] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - face_padding_left : typing.Optional[int] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments - face_padding_right : typing.Optional[int] + eval_urls : typing.Optional[typing.Sequence[str]] - sadtalker_settings : typing.Optional[SadTalkerSettings] + _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs. - selected_model : typing.Optional[PostV3LipsyncTtsAsyncFormRequestSelectedModel] settings : typing.Optional[RunSettings] @@ -14419,7 +5592,7 @@ async def post_v3lipsync_tts_async_form( Returns ------- - LipsyncTtsPageStatusResponse + typing.Optional[BulkRunnerPageOutput] Successful Response Examples @@ -14431,128 +5604,142 @@ async def post_v3lipsync_tts_async_form( client = AsyncGooey( api_key="YOUR_API_KEY", ) - - - async def main() -> None: - await client.post_v3lipsync_tts_async_form( - text_prompt="text_prompt", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/LipsyncTTS/async/form", - method="POST", - data={ - "functions": functions, - "variables": variables, - "text_prompt": text_prompt, - "tts_provider": tts_provider, - "uberduck_voice_name": uberduck_voice_name, - "uberduck_speaking_rate": uberduck_speaking_rate, - "google_voice_name": google_voice_name, - "google_speaking_rate": google_speaking_rate, - "google_pitch": google_pitch, - "bark_history_prompt": bark_history_prompt, - "elevenlabs_voice_name": elevenlabs_voice_name, - "elevenlabs_api_key": elevenlabs_api_key, - "elevenlabs_voice_id": elevenlabs_voice_id, - "elevenlabs_model": elevenlabs_model, - "elevenlabs_stability": elevenlabs_stability, - "elevenlabs_similarity_boost": elevenlabs_similarity_boost, - "elevenlabs_style": elevenlabs_style, - "elevenlabs_speaker_boost": elevenlabs_speaker_boost, - "azure_voice_name": azure_voice_name, - "openai_voice_name": openai_voice_name, - "openai_tts_model": openai_tts_model, - "input_face": input_face, - "face_padding_top": face_padding_top, - "face_padding_bottom": face_padding_bottom, - "face_padding_left": face_padding_left, - "face_padding_right": face_padding_right, - "sadtalker_settings": sadtalker_settings, - "selected_model": selected_model, + + + async def main() -> None: + await client.bulk_run( + documents=["documents"], + run_urls=["run_urls"], + input_columns={"key": "value"}, + output_columns={"key": "value"}, + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/bulk-runner/async", + method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "documents": documents, + "run_urls": run_urls, + "input_columns": input_columns, + "output_columns": output_columns, + "eval_urls": eval_urls, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast( - LipsyncTtsPageStatusResponse, + _parsed_response = typing.cast( + BulkRunnerPageStatusResponse, parse_obj_as( - type_=LipsyncTtsPageStatusResponse, # type: ignore + type_=BulkRunnerPageStatusResponse, # type: ignore object_=_response.json(), ), ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3object_inpainting_async_form( + async def synthesize_data( self, *, - input_image: str, - text_prompt: str, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - obj_scale: typing.Optional[float] = None, - obj_pos_x: typing.Optional[float] = None, - obj_pos_y: typing.Optional[float] = None, - mask_threshold: typing.Optional[float] = None, - selected_model: typing.Optional[PostV3ObjectInpaintingAsyncFormRequestSelectedModel] = None, - negative_prompt: typing.Optional[str] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[int] = None, - output_width: typing.Optional[int] = None, - output_height: typing.Optional[int] = None, - guidance_scale: typing.Optional[float] = None, - sd2upscaling: typing.Optional[bool] = None, - seed: typing.Optional[int] = None, - settings: typing.Optional[RunSettings] = None, + documents: typing.Sequence[str], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + sheet_url: typing.Optional[str] = OMIT, + selected_asr_model: typing.Optional[DocExtractPageRequestSelectedAsrModel] = OMIT, + google_translate_target: typing.Optional[str] = OMIT, + glossary_document: typing.Optional[str] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[DocExtractPageRequestSelectedModel] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[DocExtractPageRequestResponseFormatType] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> ObjectInpaintingPageStatusResponse: + ) -> typing.Optional[DocExtractPageOutput]: """ Parameters ---------- - input_image : str + documents : typing.Sequence[str] - text_prompt : str + example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - obj_scale : typing.Optional[float] - - obj_pos_x : typing.Optional[float] + sheet_url : typing.Optional[str] - obj_pos_y : typing.Optional[float] + selected_asr_model : typing.Optional[DocExtractPageRequestSelectedAsrModel] - mask_threshold : typing.Optional[float] + google_translate_target : typing.Optional[str] - selected_model : typing.Optional[PostV3ObjectInpaintingAsyncFormRequestSelectedModel] + glossary_document : typing.Optional[str] + Provide a glossary to customize translation and improve accuracy of domain-specific terms. + If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). - negative_prompt : typing.Optional[str] + task_instructions : typing.Optional[str] - num_outputs : typing.Optional[int] + selected_model : typing.Optional[DocExtractPageRequestSelectedModel] - quality : typing.Optional[int] + avoid_repetition : typing.Optional[bool] - output_width : typing.Optional[int] + num_outputs : typing.Optional[int] - output_height : typing.Optional[int] + quality : typing.Optional[float] - guidance_scale : typing.Optional[float] + max_tokens : typing.Optional[int] - sd2upscaling : typing.Optional[bool] + sampling_temperature : typing.Optional[float] - seed : typing.Optional[int] + response_format_type : typing.Optional[DocExtractPageRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -14561,7 +5748,7 @@ async def post_v3object_inpainting_async_form( Returns ------- - ObjectInpaintingPageStatusResponse + typing.Optional[DocExtractPageOutput] Successful Response Examples @@ -14576,103 +5763,115 @@ async def post_v3object_inpainting_async_form( async def main() -> None: - await client.post_v3object_inpainting_async_form( - input_image="input_image", - text_prompt="text_prompt", + await client.synthesize_data( + documents=["documents"], ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/ObjectInpainting/async/form", + "v3/doc-extract/async", method="POST", - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, - "input_image": input_image, - "text_prompt": text_prompt, - "obj_scale": obj_scale, - "obj_pos_x": obj_pos_x, - "obj_pos_y": obj_pos_y, - "mask_threshold": mask_threshold, + "documents": documents, + "sheet_url": sheet_url, + "selected_asr_model": selected_asr_model, + "google_translate_target": google_translate_target, + "glossary_document": glossary_document, + "task_instructions": task_instructions, "selected_model": selected_model, - "negative_prompt": negative_prompt, + "avoid_repetition": avoid_repetition, "num_outputs": num_outputs, "quality": quality, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "sd_2_upscaling": sd2upscaling, - "seed": seed, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast( - ObjectInpaintingPageStatusResponse, + _parsed_response = typing.cast( + DocExtractPageStatusResponse, parse_obj_as( - type_=ObjectInpaintingPageStatusResponse, # type: ignore + type_=DocExtractPageStatusResponse, # type: ignore object_=_response.json(), ), ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3seo_summary_async_form( + async def llm( self, *, - search_query: str, - keywords: str, - title: str, - company_url: str, - task_instructions: typing.Optional[str] = None, - enable_html: typing.Optional[bool] = None, - selected_model: typing.Optional[PostV3SeoSummaryAsyncFormRequestSelectedModel] = None, - max_search_urls: typing.Optional[int] = None, - enable_crosslinks: typing.Optional[bool] = None, - seed: typing.Optional[int] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[PostV3SeoSummaryAsyncFormRequestResponseFormatType] = None, - serp_search_location: typing.Optional[SerpSearchLocation] = None, - scaleserp_locations: typing.Optional[typing.List[str]] = None, - serp_search_type: typing.Optional[SerpSearchType] = None, - scaleserp_search_field: typing.Optional[str] = None, - settings: typing.Optional[RunSettings] = None, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + input_prompt: typing.Optional[str] = OMIT, + selected_models: typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[CompareLlmPageRequestResponseFormatType] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> SeoSummaryPageStatusResponse: + ) -> typing.Optional[CompareLlmPageOutput]: """ Parameters ---------- - search_query : str - - keywords : str - - title : str - - company_url : str - - task_instructions : typing.Optional[str] - - enable_html : typing.Optional[bool] + example_id : typing.Optional[str] - selected_model : typing.Optional[PostV3SeoSummaryAsyncFormRequestSelectedModel] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - max_search_urls : typing.Optional[int] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments - enable_crosslinks : typing.Optional[bool] + input_prompt : typing.Optional[str] - seed : typing.Optional[int] + selected_models : typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]] avoid_repetition : typing.Optional[bool] @@ -14684,17 +5883,7 @@ async def post_v3seo_summary_async_form( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[PostV3SeoSummaryAsyncFormRequestResponseFormatType] - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.List[str]] - DEPRECATED: use `serp_search_location` instead - - serp_search_type : typing.Optional[SerpSearchType] - - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead + response_format_type : typing.Optional[CompareLlmPageRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -14703,7 +5892,7 @@ async def post_v3seo_summary_async_form( Returns ------- - SeoSummaryPageStatusResponse + typing.Optional[CompareLlmPageOutput] Successful Response Examples @@ -14718,96 +5907,145 @@ async def post_v3seo_summary_async_form( async def main() -> None: - await client.post_v3seo_summary_async_form( - search_query="search_query", - keywords="keywords", - title="title", - company_url="company_url", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/SEOSummary/async/form", - method="POST", - data={ - "search_query": search_query, - "keywords": keywords, - "title": title, - "company_url": company_url, - "task_instructions": task_instructions, - "enable_html": enable_html, - "selected_model": selected_model, - "max_search_urls": max_search_urls, - "enable_crosslinks": enable_crosslinks, - "seed": seed, + await client.llm() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/CompareLLM/async", + method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "input_prompt": input_prompt, + "selected_models": selected_models, "avoid_repetition": avoid_repetition, "num_outputs": num_outputs, "quality": quality, "max_tokens": max_tokens, "sampling_temperature": sampling_temperature, "response_format_type": response_format_type, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast( - SeoSummaryPageStatusResponse, + _parsed_response = typing.cast( + CompareLlmPageStatusResponse, parse_obj_as( - type_=SeoSummaryPageStatusResponse, # type: ignore + type_=CompareLlmPageStatusResponse, # type: ignore object_=_response.json(), ), ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3smart_gpt_async_form( + async def rag( self, *, - input_prompt: str, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - cot_prompt: typing.Optional[str] = None, - reflexion_prompt: typing.Optional[str] = None, - dera_prompt: typing.Optional[str] = None, - selected_model: typing.Optional[PostV3SmartGptAsyncFormRequestSelectedModel] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[PostV3SmartGptAsyncFormRequestResponseFormatType] = None, - settings: typing.Optional[RunSettings] = None, + search_query: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + keyword_query: typing.Optional[DocSearchPageRequestKeywordQuery] = OMIT, + documents: typing.Optional[typing.Sequence[str]] = OMIT, + max_references: typing.Optional[int] = OMIT, + max_context_words: typing.Optional[int] = OMIT, + scroll_jump: typing.Optional[int] = OMIT, + doc_extract_url: typing.Optional[str] = OMIT, + embedding_model: typing.Optional[DocSearchPageRequestEmbeddingModel] = OMIT, + dense_weight: typing.Optional[float] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + query_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[DocSearchPageRequestSelectedModel] = OMIT, + citation_style: typing.Optional[DocSearchPageRequestCitationStyle] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[DocSearchPageRequestResponseFormatType] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> SmartGptPageStatusResponse: + ) -> typing.Optional[DocSearchPageOutput]: """ Parameters ---------- - input_prompt : str + search_query : str + + example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - cot_prompt : typing.Optional[str] + keyword_query : typing.Optional[DocSearchPageRequestKeywordQuery] + + documents : typing.Optional[typing.Sequence[str]] + + max_references : typing.Optional[int] + + max_context_words : typing.Optional[int] + + scroll_jump : typing.Optional[int] + + doc_extract_url : typing.Optional[str] + + embedding_model : typing.Optional[DocSearchPageRequestEmbeddingModel] + + dense_weight : typing.Optional[float] + + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - reflexion_prompt : typing.Optional[str] - dera_prompt : typing.Optional[str] + task_instructions : typing.Optional[str] + + query_instructions : typing.Optional[str] + + selected_model : typing.Optional[DocSearchPageRequestSelectedModel] - selected_model : typing.Optional[PostV3SmartGptAsyncFormRequestSelectedModel] + citation_style : typing.Optional[DocSearchPageRequestCitationStyle] avoid_repetition : typing.Optional[bool] @@ -14819,7 +6057,7 @@ async def post_v3smart_gpt_async_form( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[PostV3SmartGptAsyncFormRequestResponseFormatType] + response_format_type : typing.Optional[DocSearchPageRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -14828,7 +6066,7 @@ async def post_v3smart_gpt_async_form( Returns ------- - SmartGptPageStatusResponse + typing.Optional[DocSearchPageOutput] Successful Response Examples @@ -14843,24 +6081,35 @@ async def post_v3smart_gpt_async_form( async def main() -> None: - await client.post_v3smart_gpt_async_form( - input_prompt="input_prompt", + await client.rag( + search_query="search_query", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/SmartGPT/async/form", + "v3/doc-search/async", method="POST", - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, - "input_prompt": input_prompt, - "cot_prompt": cot_prompt, - "reflexion_prompt": reflexion_prompt, - "dera_prompt": dera_prompt, + "search_query": search_query, + "keyword_query": keyword_query, + "documents": documents, + "max_references": max_references, + "max_context_words": max_context_words, + "scroll_jump": scroll_jump, + "doc_extract_url": doc_extract_url, + "embedding_model": embedding_model, + "dense_weight": dense_weight, + "task_instructions": task_instructions, + "query_instructions": query_instructions, "selected_model": selected_model, + "citation_style": citation_style, "avoid_repetition": avoid_repetition, "num_outputs": num_outputs, "quality": quality, @@ -14869,54 +6118,99 @@ async def main() -> None: "response_format_type": response_format_type, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast( - SmartGptPageStatusResponse, + _parsed_response = typing.cast( + DocSearchPageStatusResponse, parse_obj_as( - type_=SmartGptPageStatusResponse, # type: ignore + type_=DocSearchPageStatusResponse, # type: ignore object_=_response.json(), ), ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3social_lookup_email_async_form( + async def doc_summary( self, *, - email_address: str, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - input_prompt: typing.Optional[str] = None, - selected_model: typing.Optional[PostV3SocialLookupEmailAsyncFormRequestSelectedModel] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[PostV3SocialLookupEmailAsyncFormRequestResponseFormatType] = None, - settings: typing.Optional[RunSettings] = None, + documents: typing.Sequence[str], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + merge_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[DocSummaryPageRequestSelectedModel] = OMIT, + chain_type: typing.Optional[typing.Literal["map_reduce"]] = OMIT, + selected_asr_model: typing.Optional[DocSummaryPageRequestSelectedAsrModel] = OMIT, + google_translate_target: typing.Optional[str] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[DocSummaryPageRequestResponseFormatType] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> SocialLookupEmailPageStatusResponse: + ) -> typing.Optional[DocSummaryPageOutput]: """ Parameters ---------- - email_address : str + documents : typing.Sequence[str] + + example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - input_prompt : typing.Optional[str] + task_instructions : typing.Optional[str] + + merge_instructions : typing.Optional[str] + + selected_model : typing.Optional[DocSummaryPageRequestSelectedModel] + + chain_type : typing.Optional[typing.Literal["map_reduce"]] - selected_model : typing.Optional[PostV3SocialLookupEmailAsyncFormRequestSelectedModel] + selected_asr_model : typing.Optional[DocSummaryPageRequestSelectedAsrModel] + + google_translate_target : typing.Optional[str] avoid_repetition : typing.Optional[bool] @@ -14928,7 +6222,7 @@ async def post_v3social_lookup_email_async_form( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[PostV3SocialLookupEmailAsyncFormRequestResponseFormatType] + response_format_type : typing.Optional[DocSummaryPageRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -14937,7 +6231,7 @@ async def post_v3social_lookup_email_async_form( Returns ------- - SocialLookupEmailPageStatusResponse + typing.Optional[DocSummaryPageOutput] Successful Response Examples @@ -14952,22 +6246,29 @@ async def post_v3social_lookup_email_async_form( async def main() -> None: - await client.post_v3social_lookup_email_async_form( - email_address="email_address", + await client.doc_summary( + documents=["documents"], ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/SocialLookupEmail/async/form", + "v3/doc-summary/async", method="POST", - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, - "email_address": email_address, - "input_prompt": input_prompt, + "documents": documents, + "task_instructions": task_instructions, + "merge_instructions": merge_instructions, "selected_model": selected_model, + "chain_type": chain_type, + "selected_asr_model": selected_asr_model, + "google_translate_target": google_translate_target, "avoid_repetition": avoid_repetition, "num_outputs": num_outputs, "quality": quality, @@ -14976,62 +6277,102 @@ async def main() -> None: "response_format_type": response_format_type, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast( - SocialLookupEmailPageStatusResponse, + _parsed_response = typing.cast( + DocSummaryPageStatusResponse, parse_obj_as( - type_=SocialLookupEmailPageStatusResponse, # type: ignore + type_=DocSummaryPageStatusResponse, # type: ignore object_=_response.json(), ), ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3text_to_speech_async_form( + async def lipsync_tts( self, *, text_prompt: str, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - tts_provider: typing.Optional[PostV3TextToSpeechAsyncFormRequestTtsProvider] = None, - uberduck_voice_name: typing.Optional[str] = None, - uberduck_speaking_rate: typing.Optional[float] = None, - google_voice_name: typing.Optional[str] = None, - google_speaking_rate: typing.Optional[float] = None, - google_pitch: typing.Optional[float] = None, - bark_history_prompt: typing.Optional[str] = None, - elevenlabs_voice_name: typing.Optional[str] = None, - elevenlabs_api_key: typing.Optional[str] = None, - elevenlabs_voice_id: typing.Optional[str] = None, - elevenlabs_model: typing.Optional[str] = None, - elevenlabs_stability: typing.Optional[float] = None, - elevenlabs_similarity_boost: typing.Optional[float] = None, - elevenlabs_style: typing.Optional[float] = None, - elevenlabs_speaker_boost: typing.Optional[bool] = None, - azure_voice_name: typing.Optional[str] = None, - openai_voice_name: typing.Optional[PostV3TextToSpeechAsyncFormRequestOpenaiVoiceName] = None, - openai_tts_model: typing.Optional[PostV3TextToSpeechAsyncFormRequestOpenaiTtsModel] = None, - settings: typing.Optional[RunSettings] = None, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + tts_provider: typing.Optional[LipsyncTtsPageRequestTtsProvider] = OMIT, + uberduck_voice_name: typing.Optional[str] = OMIT, + uberduck_speaking_rate: typing.Optional[float] = OMIT, + google_voice_name: typing.Optional[str] = OMIT, + google_speaking_rate: typing.Optional[float] = OMIT, + google_pitch: typing.Optional[float] = OMIT, + bark_history_prompt: typing.Optional[str] = OMIT, + elevenlabs_voice_name: typing.Optional[str] = OMIT, + elevenlabs_api_key: typing.Optional[str] = OMIT, + elevenlabs_voice_id: typing.Optional[str] = OMIT, + elevenlabs_model: typing.Optional[str] = OMIT, + elevenlabs_stability: typing.Optional[float] = OMIT, + elevenlabs_similarity_boost: typing.Optional[float] = OMIT, + elevenlabs_style: typing.Optional[float] = OMIT, + elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, + azure_voice_name: typing.Optional[str] = OMIT, + openai_voice_name: typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] = OMIT, + openai_tts_model: typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] = OMIT, + input_face: typing.Optional[str] = OMIT, + face_padding_top: typing.Optional[int] = OMIT, + face_padding_bottom: typing.Optional[int] = OMIT, + face_padding_left: typing.Optional[int] = OMIT, + face_padding_right: typing.Optional[int] = OMIT, + sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT, + selected_model: typing.Optional[LipsyncTtsPageRequestSelectedModel] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> TextToSpeechPageStatusResponse: + ) -> typing.Optional[LipsyncTtsPageOutput]: """ Parameters ---------- text_prompt : str - functions : typing.Optional[typing.List[RecipeFunction]] + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - tts_provider : typing.Optional[PostV3TextToSpeechAsyncFormRequestTtsProvider] + tts_provider : typing.Optional[LipsyncTtsPageRequestTtsProvider] uberduck_voice_name : typing.Optional[str] @@ -15064,9 +6405,23 @@ async def post_v3text_to_speech_async_form( azure_voice_name : typing.Optional[str] - openai_voice_name : typing.Optional[PostV3TextToSpeechAsyncFormRequestOpenaiVoiceName] + openai_voice_name : typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] + + openai_tts_model : typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] + + input_face : typing.Optional[str] + + face_padding_top : typing.Optional[int] + + face_padding_bottom : typing.Optional[int] + + face_padding_left : typing.Optional[int] + + face_padding_right : typing.Optional[int] + + sadtalker_settings : typing.Optional[SadTalkerSettings] - openai_tts_model : typing.Optional[PostV3TextToSpeechAsyncFormRequestOpenaiTtsModel] + selected_model : typing.Optional[LipsyncTtsPageRequestSelectedModel] settings : typing.Optional[RunSettings] @@ -15075,7 +6430,7 @@ async def post_v3text_to_speech_async_form( Returns ------- - TextToSpeechPageStatusResponse + typing.Optional[LipsyncTtsPageOutput] Successful Response Examples @@ -15090,7 +6445,7 @@ async def post_v3text_to_speech_async_form( async def main() -> None: - await client.post_v3text_to_speech_async_form( + await client.lipsync_tts( text_prompt="text_prompt", ) @@ -15098,9 +6453,12 @@ async def main() -> None: asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/TextToSpeech/async/form", + "v3/LipsyncTTS/async", method="POST", - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, "text_prompt": text_prompt, @@ -15122,250 +6480,139 @@ async def main() -> None: "azure_voice_name": azure_voice_name, "openai_voice_name": openai_voice_name, "openai_tts_model": openai_tts_model, - "settings": settings, - }, - files={}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - TextToSpeechPageStatusResponse, - parse_obj_as( - type_=TextToSpeechPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def post_v3art_qr_code_async_form( - self, - *, - text_prompt: str, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - qr_code_data: typing.Optional[str] = None, - qr_code_input_image: typing.Optional[str] = None, - qr_code_vcard: typing.Optional[Vcard] = None, - qr_code_file: typing.Optional[str] = None, - use_url_shortener: typing.Optional[bool] = None, - negative_prompt: typing.Optional[str] = None, - image_prompt: typing.Optional[str] = None, - image_prompt_controlnet_models: typing.Optional[ - typing.List[PostV3ArtQrCodeAsyncFormRequestImagePromptControlnetModelsItem] - ] = None, - image_prompt_strength: typing.Optional[float] = None, - image_prompt_scale: typing.Optional[float] = None, - image_prompt_pos_x: typing.Optional[float] = None, - image_prompt_pos_y: typing.Optional[float] = None, - selected_model: typing.Optional[PostV3ArtQrCodeAsyncFormRequestSelectedModel] = None, - selected_controlnet_model: typing.Optional[ - typing.List[PostV3ArtQrCodeAsyncFormRequestSelectedControlnetModelItem] - ] = None, - output_width: typing.Optional[int] = None, - output_height: typing.Optional[int] = None, - guidance_scale: typing.Optional[float] = None, - controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[int] = None, - scheduler: typing.Optional[PostV3ArtQrCodeAsyncFormRequestScheduler] = None, - seed: typing.Optional[int] = None, - obj_scale: typing.Optional[float] = None, - obj_pos_x: typing.Optional[float] = None, - obj_pos_y: typing.Optional[float] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> QrCodeGeneratorPageStatusResponse: - """ - Parameters - ---------- - text_prompt : str - - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - qr_code_data : typing.Optional[str] - - qr_code_input_image : typing.Optional[str] - - qr_code_vcard : typing.Optional[Vcard] - - qr_code_file : typing.Optional[str] - - use_url_shortener : typing.Optional[bool] - - negative_prompt : typing.Optional[str] - - image_prompt : typing.Optional[str] - - image_prompt_controlnet_models : typing.Optional[typing.List[PostV3ArtQrCodeAsyncFormRequestImagePromptControlnetModelsItem]] - - image_prompt_strength : typing.Optional[float] - - image_prompt_scale : typing.Optional[float] - - image_prompt_pos_x : typing.Optional[float] - - image_prompt_pos_y : typing.Optional[float] - - selected_model : typing.Optional[PostV3ArtQrCodeAsyncFormRequestSelectedModel] - - selected_controlnet_model : typing.Optional[typing.List[PostV3ArtQrCodeAsyncFormRequestSelectedControlnetModelItem]] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - controlnet_conditioning_scale : typing.Optional[typing.List[float]] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - scheduler : typing.Optional[PostV3ArtQrCodeAsyncFormRequestScheduler] - - seed : typing.Optional[int] - - obj_scale : typing.Optional[float] - - obj_pos_x : typing.Optional[float] - - obj_pos_y : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - QrCodeGeneratorPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.post_v3art_qr_code_async_form( - text_prompt="text_prompt", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/art-qr-code/async/form", - method="POST", - data={ - "functions": functions, - "variables": variables, - "qr_code_data": qr_code_data, - "qr_code_input_image": qr_code_input_image, - "qr_code_vcard": qr_code_vcard, - "qr_code_file": qr_code_file, - "use_url_shortener": use_url_shortener, - "text_prompt": text_prompt, - "negative_prompt": negative_prompt, - "image_prompt": image_prompt, - "image_prompt_controlnet_models": image_prompt_controlnet_models, - "image_prompt_strength": image_prompt_strength, - "image_prompt_scale": image_prompt_scale, - "image_prompt_pos_x": image_prompt_pos_x, - "image_prompt_pos_y": image_prompt_pos_y, + "input_face": input_face, + "face_padding_top": face_padding_top, + "face_padding_bottom": face_padding_bottom, + "face_padding_left": face_padding_left, + "face_padding_right": face_padding_right, + "sadtalker_settings": sadtalker_settings, "selected_model": selected_model, - "selected_controlnet_model": selected_controlnet_model, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "controlnet_conditioning_scale": controlnet_conditioning_scale, - "num_outputs": num_outputs, - "quality": quality, - "scheduler": scheduler, - "seed": seed, - "obj_scale": obj_scale, - "obj_pos_x": obj_pos_x, - "obj_pos_y": obj_pos_y, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast( - QrCodeGeneratorPageStatusResponse, + _parsed_response = typing.cast( + LipsyncTtsPageStatusResponse, parse_obj_as( - type_=QrCodeGeneratorPageStatusResponse, # type: ignore + type_=LipsyncTtsPageStatusResponse, # type: ignore object_=_response.json(), ), ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3asr_async_form( + async def text_to_speech( self, *, - documents: typing.List[str], - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - selected_model: typing.Optional[PostV3AsrAsyncFormRequestSelectedModel] = None, - language: typing.Optional[str] = None, - translation_model: typing.Optional[PostV3AsrAsyncFormRequestTranslationModel] = None, - output_format: typing.Optional[PostV3AsrAsyncFormRequestOutputFormat] = None, - google_translate_target: typing.Optional[str] = None, - translation_source: typing.Optional[str] = None, - translation_target: typing.Optional[str] = None, - glossary_document: typing.Optional[str] = None, - settings: typing.Optional[RunSettings] = None, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + tts_provider: typing.Optional[TextToSpeechPageRequestTtsProvider] = OMIT, + uberduck_voice_name: typing.Optional[str] = OMIT, + uberduck_speaking_rate: typing.Optional[float] = OMIT, + google_voice_name: typing.Optional[str] = OMIT, + google_speaking_rate: typing.Optional[float] = OMIT, + google_pitch: typing.Optional[float] = OMIT, + bark_history_prompt: typing.Optional[str] = OMIT, + elevenlabs_voice_name: typing.Optional[str] = OMIT, + elevenlabs_api_key: typing.Optional[str] = OMIT, + elevenlabs_voice_id: typing.Optional[str] = OMIT, + elevenlabs_model: typing.Optional[str] = OMIT, + elevenlabs_stability: typing.Optional[float] = OMIT, + elevenlabs_similarity_boost: typing.Optional[float] = OMIT, + elevenlabs_style: typing.Optional[float] = OMIT, + elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, + azure_voice_name: typing.Optional[str] = OMIT, + openai_voice_name: typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] = OMIT, + openai_tts_model: typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> AsrPageStatusResponse: + ) -> typing.Optional[TextToSpeechPageOutput]: """ Parameters ---------- - documents : typing.List[str] + text_prompt : str - functions : typing.Optional[typing.List[RecipeFunction]] + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - selected_model : typing.Optional[PostV3AsrAsyncFormRequestSelectedModel] + tts_provider : typing.Optional[TextToSpeechPageRequestTtsProvider] + + uberduck_voice_name : typing.Optional[str] - language : typing.Optional[str] + uberduck_speaking_rate : typing.Optional[float] - translation_model : typing.Optional[PostV3AsrAsyncFormRequestTranslationModel] + google_voice_name : typing.Optional[str] - output_format : typing.Optional[PostV3AsrAsyncFormRequestOutputFormat] + google_speaking_rate : typing.Optional[float] - google_translate_target : typing.Optional[str] - use `translation_model` & `translation_target` instead. + google_pitch : typing.Optional[float] - translation_source : typing.Optional[str] + bark_history_prompt : typing.Optional[str] - translation_target : typing.Optional[str] + elevenlabs_voice_name : typing.Optional[str] + Use `elevenlabs_voice_id` instead - glossary_document : typing.Optional[str] - Provide a glossary to customize translation and improve accuracy of domain-specific terms. - If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). + elevenlabs_api_key : typing.Optional[str] + + elevenlabs_voice_id : typing.Optional[str] + + elevenlabs_model : typing.Optional[str] + + elevenlabs_stability : typing.Optional[float] + + elevenlabs_similarity_boost : typing.Optional[float] + + elevenlabs_style : typing.Optional[float] + + elevenlabs_speaker_boost : typing.Optional[bool] + + azure_voice_name : typing.Optional[str] + + openai_voice_name : typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] + + openai_tts_model : typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] settings : typing.Optional[RunSettings] @@ -15374,7 +6621,7 @@ async def post_v3asr_async_form( Returns ------- - AsrPageStatusResponse + typing.Optional[TextToSpeechPageOutput] Successful Response Examples @@ -15389,105 +6636,139 @@ async def post_v3asr_async_form( async def main() -> None: - await client.post_v3asr_async_form( - documents=["documents"], + await client.text_to_speech( + text_prompt="text_prompt", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/asr/async/form", + "v3/TextToSpeech/async", method="POST", - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, - "documents": documents, - "selected_model": selected_model, - "language": language, - "translation_model": translation_model, - "output_format": output_format, - "google_translate_target": google_translate_target, - "translation_source": translation_source, - "translation_target": translation_target, - "glossary_document": glossary_document, + "text_prompt": text_prompt, + "tts_provider": tts_provider, + "uberduck_voice_name": uberduck_voice_name, + "uberduck_speaking_rate": uberduck_speaking_rate, + "google_voice_name": google_voice_name, + "google_speaking_rate": google_speaking_rate, + "google_pitch": google_pitch, + "bark_history_prompt": bark_history_prompt, + "elevenlabs_voice_name": elevenlabs_voice_name, + "elevenlabs_api_key": elevenlabs_api_key, + "elevenlabs_voice_id": elevenlabs_voice_id, + "elevenlabs_model": elevenlabs_model, + "elevenlabs_stability": elevenlabs_stability, + "elevenlabs_similarity_boost": elevenlabs_similarity_boost, + "elevenlabs_style": elevenlabs_style, + "elevenlabs_speaker_boost": elevenlabs_speaker_boost, + "azure_voice_name": azure_voice_name, + "openai_voice_name": openai_voice_name, + "openai_tts_model": openai_tts_model, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast( - AsrPageStatusResponse, + _parsed_response = typing.cast( + TextToSpeechPageStatusResponse, parse_obj_as( - type_=AsrPageStatusResponse, # type: ignore + type_=TextToSpeechPageStatusResponse, # type: ignore object_=_response.json(), ), ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3bulk_eval_async_form( + async def speech_recognition( self, *, - documents: typing.List[str], - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - eval_prompts: typing.Optional[typing.List[EvalPrompt]] = None, - agg_functions: typing.Optional[typing.List[AggFunction]] = None, - selected_model: typing.Optional[PostV3BulkEvalAsyncFormRequestSelectedModel] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[PostV3BulkEvalAsyncFormRequestResponseFormatType] = None, - settings: typing.Optional[RunSettings] = None, + documents: typing.Sequence[str], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + selected_model: typing.Optional[AsrPageRequestSelectedModel] = OMIT, + language: typing.Optional[str] = OMIT, + translation_model: typing.Optional[AsrPageRequestTranslationModel] = OMIT, + output_format: typing.Optional[AsrPageRequestOutputFormat] = OMIT, + google_translate_target: typing.Optional[str] = OMIT, + translation_source: typing.Optional[str] = OMIT, + translation_target: typing.Optional[str] = OMIT, + glossary_document: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> BulkEvalPageStatusResponse: + ) -> typing.Optional[AsrPageOutput]: """ Parameters ---------- - documents : typing.List[str] - - Upload or link to a CSV or google sheet that contains your sample input data. - For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. - Remember to includes header names in your CSV too. + documents : typing.Sequence[str] + example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - eval_prompts : typing.Optional[typing.List[EvalPrompt]] - - Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values. - _The `columns` dictionary can be used to reference the spreadsheet columns._ - - - agg_functions : typing.Optional[typing.List[AggFunction]] - - Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats). - + selected_model : typing.Optional[AsrPageRequestSelectedModel] - selected_model : typing.Optional[PostV3BulkEvalAsyncFormRequestSelectedModel] + language : typing.Optional[str] - avoid_repetition : typing.Optional[bool] + translation_model : typing.Optional[AsrPageRequestTranslationModel] - num_outputs : typing.Optional[int] + output_format : typing.Optional[AsrPageRequestOutputFormat] - quality : typing.Optional[float] + google_translate_target : typing.Optional[str] + use `translation_model` & `translation_target` instead. - max_tokens : typing.Optional[int] + translation_source : typing.Optional[str] - sampling_temperature : typing.Optional[float] + translation_target : typing.Optional[str] - response_format_type : typing.Optional[PostV3BulkEvalAsyncFormRequestResponseFormatType] + glossary_document : typing.Optional[str] + Provide a glossary to customize translation and improve accuracy of domain-specific terms. + If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). settings : typing.Optional[RunSettings] @@ -15496,7 +6777,7 @@ async def post_v3bulk_eval_async_form( Returns ------- - BulkEvalPageStatusResponse + typing.Optional[AsrPageOutput] Successful Response Examples @@ -15511,7 +6792,7 @@ async def post_v3bulk_eval_async_form( async def main() -> None: - await client.post_v3bulk_eval_async_form( + await client.speech_recognition( documents=["documents"], ) @@ -15519,89 +6800,118 @@ async def main() -> None: asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/bulk-eval/async/form", + "v3/asr/async", method="POST", - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, "documents": documents, - "eval_prompts": eval_prompts, - "agg_functions": agg_functions, "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, + "language": language, + "translation_model": translation_model, + "output_format": output_format, + "google_translate_target": google_translate_target, + "translation_source": translation_source, + "translation_target": translation_target, + "glossary_document": glossary_document, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast( - BulkEvalPageStatusResponse, + _parsed_response = typing.cast( + AsrPageStatusResponse, parse_obj_as( - type_=BulkEvalPageStatusResponse, # type: ignore + type_=AsrPageStatusResponse, # type: ignore object_=_response.json(), ), ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3bulk_runner_async_form( + async def text_to_music( self, *, - documents: typing.List[str], - run_urls: typing.List[str], - input_columns: typing.Dict[str, str], - output_columns: typing.Dict[str, str], - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - eval_urls: typing.Optional[typing.List[str]] = None, - settings: typing.Optional[RunSettings] = None, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + duration_sec: typing.Optional[float] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + seed: typing.Optional[int] = OMIT, + sd2upscaling: typing.Optional[bool] = OMIT, + selected_models: typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> BulkRunnerPageStatusResponse: + ) -> typing.Optional[Text2AudioPageOutput]: """ Parameters ---------- - documents : typing.List[str] - - Upload or link to a CSV or google sheet that contains your sample input data. - For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. - Remember to includes header names in your CSV too. - - - run_urls : typing.List[str] - - Provide one or more Gooey.AI workflow runs. - You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them. - + text_prompt : str - input_columns : typing.Dict[str, str] + example_id : typing.Optional[str] - For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it. + functions : typing.Optional[typing.Sequence[RecipeFunction]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments - output_columns : typing.Dict[str, str] + negative_prompt : typing.Optional[str] - For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data. + duration_sec : typing.Optional[float] + num_outputs : typing.Optional[int] - functions : typing.Optional[typing.List[RecipeFunction]] + quality : typing.Optional[int] - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments + guidance_scale : typing.Optional[float] - eval_urls : typing.Optional[typing.List[str]] + seed : typing.Optional[int] - _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs. + sd2upscaling : typing.Optional[bool] + selected_models : typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] settings : typing.Optional[RunSettings] @@ -15610,7 +6920,7 @@ async def post_v3bulk_runner_async_form( Returns ------- - BulkRunnerPageStatusResponse + typing.Optional[Text2AudioPageOutput] Successful Response Examples @@ -15625,82 +6935,116 @@ async def post_v3bulk_runner_async_form( async def main() -> None: - await client.post_v3bulk_runner_async_form( - documents=["documents"], - run_urls=["run_urls"], - input_columns={"key": "value"}, - output_columns={"key": "value"}, + await client.text_to_music( + text_prompt="text_prompt", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/bulk-runner/async/form", + "v3/text2audio/async", method="POST", - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, - "documents": documents, - "run_urls": run_urls, - "input_columns": input_columns, - "output_columns": output_columns, - "eval_urls": eval_urls, + "text_prompt": text_prompt, + "negative_prompt": negative_prompt, + "duration_sec": duration_sec, + "num_outputs": num_outputs, + "quality": quality, + "guidance_scale": guidance_scale, + "seed": seed, + "sd_2_upscaling": sd2upscaling, + "selected_models": selected_models, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast( - BulkRunnerPageStatusResponse, + _parsed_response = typing.cast( + Text2AudioPageStatusResponse, parse_obj_as( - type_=BulkRunnerPageStatusResponse, # type: ignore + type_=Text2AudioPageStatusResponse, # type: ignore object_=_response.json(), ), ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3compare_ai_upscalers_async_form( + async def translate( self, *, - scale: int, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - input_image: typing.Optional[str] = None, - input_video: typing.Optional[str] = None, - selected_models: typing.Optional[ - typing.List[PostV3CompareAiUpscalersAsyncFormRequestSelectedModelsItem] - ] = None, - selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = None, - settings: typing.Optional[RunSettings] = None, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + texts: typing.Optional[typing.Sequence[str]] = OMIT, + selected_model: typing.Optional[TranslationPageRequestSelectedModel] = OMIT, + translation_source: typing.Optional[str] = OMIT, + translation_target: typing.Optional[str] = OMIT, + glossary_document: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> CompareUpscalerPageStatusResponse: + ) -> typing.Optional[TranslationPageOutput]: """ Parameters ---------- - scale : int - The final upsampling scale of the image + example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - input_image : typing.Optional[str] - Input Image + texts : typing.Optional[typing.Sequence[str]] - input_video : typing.Optional[str] - Input Video + selected_model : typing.Optional[TranslationPageRequestSelectedModel] - selected_models : typing.Optional[typing.List[PostV3CompareAiUpscalersAsyncFormRequestSelectedModelsItem]] + translation_source : typing.Optional[str] - selected_bg_model : typing.Optional[typing.Literal["real_esrgan_x2"]] + translation_target : typing.Optional[str] + + glossary_document : typing.Optional[str] + Provide a glossary to customize translation and improve accuracy of domain-specific terms. + If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). settings : typing.Optional[RunSettings] @@ -15709,7 +7053,7 @@ async def post_v3compare_ai_upscalers_async_form( Returns ------- - CompareUpscalerPageStatusResponse + typing.Optional[TranslationPageOutput] Successful Response Examples @@ -15724,100 +7068,135 @@ async def post_v3compare_ai_upscalers_async_form( async def main() -> None: - await client.post_v3compare_ai_upscalers_async_form( - scale=1, - ) + await client.translate() asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/compare-ai-upscalers/async/form", + "v3/translate/async", method="POST", - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, - "input_image": input_image, - "input_video": input_video, - "scale": scale, - "selected_models": selected_models, - "selected_bg_model": selected_bg_model, + "texts": texts, + "selected_model": selected_model, + "translation_source": translation_source, + "translation_target": translation_target, + "glossary_document": glossary_document, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast( - CompareUpscalerPageStatusResponse, + _parsed_response = typing.cast( + TranslationPageStatusResponse, parse_obj_as( - type_=CompareUpscalerPageStatusResponse, # type: ignore + type_=TranslationPageStatusResponse, # type: ignore object_=_response.json(), ), ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3doc_extract_async_form( + async def remix_image( self, *, - documents: typing.List[str], - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - sheet_url: typing.Optional[str] = None, - selected_asr_model: typing.Optional[PostV3DocExtractAsyncFormRequestSelectedAsrModel] = None, - google_translate_target: typing.Optional[str] = None, - glossary_document: typing.Optional[str] = None, - task_instructions: typing.Optional[str] = None, - selected_model: typing.Optional[PostV3DocExtractAsyncFormRequestSelectedModel] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[PostV3DocExtractAsyncFormRequestResponseFormatType] = None, - settings: typing.Optional[RunSettings] = None, + input_image: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + text_prompt: typing.Optional[str] = OMIT, + selected_model: typing.Optional[Img2ImgPageRequestSelectedModel] = OMIT, + selected_controlnet_model: typing.Optional[Img2ImgPageRequestSelectedControlnetModel] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + output_width: typing.Optional[int] = OMIT, + output_height: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + prompt_strength: typing.Optional[float] = OMIT, + controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT, + seed: typing.Optional[int] = OMIT, + image_guidance_scale: typing.Optional[float] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> DocExtractPageStatusResponse: + ) -> typing.Optional[Img2ImgPageOutput]: """ Parameters ---------- - documents : typing.List[str] + input_image : str - functions : typing.Optional[typing.List[RecipeFunction]] + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - sheet_url : typing.Optional[str] + text_prompt : typing.Optional[str] - selected_asr_model : typing.Optional[PostV3DocExtractAsyncFormRequestSelectedAsrModel] + selected_model : typing.Optional[Img2ImgPageRequestSelectedModel] - google_translate_target : typing.Optional[str] + selected_controlnet_model : typing.Optional[Img2ImgPageRequestSelectedControlnetModel] - glossary_document : typing.Optional[str] - Provide a glossary to customize translation and improve accuracy of domain-specific terms. - If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). + negative_prompt : typing.Optional[str] - task_instructions : typing.Optional[str] + num_outputs : typing.Optional[int] - selected_model : typing.Optional[PostV3DocExtractAsyncFormRequestSelectedModel] + quality : typing.Optional[int] - avoid_repetition : typing.Optional[bool] + output_width : typing.Optional[int] - num_outputs : typing.Optional[int] + output_height : typing.Optional[int] - quality : typing.Optional[float] + guidance_scale : typing.Optional[float] - max_tokens : typing.Optional[int] + prompt_strength : typing.Optional[float] - sampling_temperature : typing.Optional[float] + controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]] + + seed : typing.Optional[int] - response_format_type : typing.Optional[PostV3DocExtractAsyncFormRequestResponseFormatType] + image_guidance_scale : typing.Optional[float] settings : typing.Optional[RunSettings] @@ -15826,7 +7205,7 @@ async def post_v3doc_extract_async_form( Returns ------- - DocExtractPageStatusResponse + typing.Optional[Img2ImgPageOutput] Successful Response Examples @@ -15841,128 +7220,149 @@ async def post_v3doc_extract_async_form( async def main() -> None: - await client.post_v3doc_extract_async_form( - documents=["documents"], + await client.remix_image( + input_image="input_image", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/doc-extract/async/form", + "v3/Img2Img/async", method="POST", - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, - "documents": documents, - "sheet_url": sheet_url, - "selected_asr_model": selected_asr_model, - "google_translate_target": google_translate_target, - "glossary_document": glossary_document, - "task_instructions": task_instructions, + "input_image": input_image, + "text_prompt": text_prompt, "selected_model": selected_model, - "avoid_repetition": avoid_repetition, + "selected_controlnet_model": selected_controlnet_model, + "negative_prompt": negative_prompt, "num_outputs": num_outputs, "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, + "output_width": output_width, + "output_height": output_height, + "guidance_scale": guidance_scale, + "prompt_strength": prompt_strength, + "controlnet_conditioning_scale": controlnet_conditioning_scale, + "seed": seed, + "image_guidance_scale": image_guidance_scale, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast( - DocExtractPageStatusResponse, + _parsed_response = typing.cast( + Img2ImgPageStatusResponse, parse_obj_as( - type_=DocExtractPageStatusResponse, # type: ignore + type_=Img2ImgPageStatusResponse, # type: ignore object_=_response.json(), ), ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3doc_search_async_form( + async def text_to_image( self, *, - search_query: str, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - keyword_query: typing.Optional[PostV3DocSearchAsyncFormRequestKeywordQuery] = None, - documents: typing.Optional[typing.List[str]] = None, - max_references: typing.Optional[int] = None, - max_context_words: typing.Optional[int] = None, - scroll_jump: typing.Optional[int] = None, - doc_extract_url: typing.Optional[str] = None, - embedding_model: typing.Optional[PostV3DocSearchAsyncFormRequestEmbeddingModel] = None, - dense_weight: typing.Optional[float] = None, - task_instructions: typing.Optional[str] = None, - query_instructions: typing.Optional[str] = None, - selected_model: typing.Optional[PostV3DocSearchAsyncFormRequestSelectedModel] = None, - citation_style: typing.Optional[PostV3DocSearchAsyncFormRequestCitationStyle] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[PostV3DocSearchAsyncFormRequestResponseFormatType] = None, - settings: typing.Optional[RunSettings] = None, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + output_width: typing.Optional[int] = OMIT, + output_height: typing.Optional[int] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + dall_e3quality: typing.Optional[str] = OMIT, + dall_e3style: typing.Optional[str] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + seed: typing.Optional[int] = OMIT, + sd2upscaling: typing.Optional[bool] = OMIT, + selected_models: typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] = OMIT, + scheduler: typing.Optional[CompareText2ImgPageRequestScheduler] = OMIT, + edit_instruction: typing.Optional[str] = OMIT, + image_guidance_scale: typing.Optional[float] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> DocSearchPageStatusResponse: + ) -> typing.Optional[CompareText2ImgPageOutput]: """ Parameters ---------- - search_query : str + text_prompt : str + + example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - keyword_query : typing.Optional[PostV3DocSearchAsyncFormRequestKeywordQuery] - - documents : typing.Optional[typing.List[str]] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - doc_extract_url : typing.Optional[str] - - embedding_model : typing.Optional[PostV3DocSearchAsyncFormRequestEmbeddingModel] + negative_prompt : typing.Optional[str] - dense_weight : typing.Optional[float] + output_width : typing.Optional[int] - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + output_height : typing.Optional[int] + num_outputs : typing.Optional[int] - task_instructions : typing.Optional[str] + quality : typing.Optional[int] - query_instructions : typing.Optional[str] + dall_e3quality : typing.Optional[str] - selected_model : typing.Optional[PostV3DocSearchAsyncFormRequestSelectedModel] + dall_e3style : typing.Optional[str] - citation_style : typing.Optional[PostV3DocSearchAsyncFormRequestCitationStyle] + guidance_scale : typing.Optional[float] - avoid_repetition : typing.Optional[bool] + seed : typing.Optional[int] - num_outputs : typing.Optional[int] + sd2upscaling : typing.Optional[bool] - quality : typing.Optional[float] + selected_models : typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] - max_tokens : typing.Optional[int] + scheduler : typing.Optional[CompareText2ImgPageRequestScheduler] - sampling_temperature : typing.Optional[float] + edit_instruction : typing.Optional[str] - response_format_type : typing.Optional[PostV3DocSearchAsyncFormRequestResponseFormatType] + image_guidance_scale : typing.Optional[float] settings : typing.Optional[RunSettings] @@ -15971,7 +7371,7 @@ async def post_v3doc_search_async_form( Returns ------- - DocSearchPageStatusResponse + typing.Optional[CompareText2ImgPageOutput] Successful Response Examples @@ -15986,112 +7386,150 @@ async def post_v3doc_search_async_form( async def main() -> None: - await client.post_v3doc_search_async_form( - search_query="search_query", + await client.text_to_image( + text_prompt="text_prompt", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/doc-search/async/form", + "v3/CompareText2Img/async", method="POST", - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, - "search_query": search_query, - "keyword_query": keyword_query, - "documents": documents, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "doc_extract_url": doc_extract_url, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "selected_model": selected_model, - "citation_style": citation_style, - "avoid_repetition": avoid_repetition, + "text_prompt": text_prompt, + "negative_prompt": negative_prompt, + "output_width": output_width, + "output_height": output_height, "num_outputs": num_outputs, "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, + "dall_e_3_quality": dall_e3quality, + "dall_e_3_style": dall_e3style, + "guidance_scale": guidance_scale, + "seed": seed, + "sd_2_upscaling": sd2upscaling, + "selected_models": selected_models, + "scheduler": scheduler, + "edit_instruction": edit_instruction, + "image_guidance_scale": image_guidance_scale, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast( - DocSearchPageStatusResponse, + _parsed_response = typing.cast( + CompareText2ImgPageStatusResponse, parse_obj_as( - type_=DocSearchPageStatusResponse, # type: ignore + type_=CompareText2ImgPageStatusResponse, # type: ignore object_=_response.json(), ), ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3doc_summary_async_form( + async def product_image( self, *, - documents: typing.List[str], - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - task_instructions: typing.Optional[str] = None, - merge_instructions: typing.Optional[str] = None, - selected_model: typing.Optional[PostV3DocSummaryAsyncFormRequestSelectedModel] = None, - chain_type: typing.Optional[typing.Literal["map_reduce"]] = None, - selected_asr_model: typing.Optional[PostV3DocSummaryAsyncFormRequestSelectedAsrModel] = None, - google_translate_target: typing.Optional[str] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[PostV3DocSummaryAsyncFormRequestResponseFormatType] = None, - settings: typing.Optional[RunSettings] = None, + input_image: str, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + obj_scale: typing.Optional[float] = OMIT, + obj_pos_x: typing.Optional[float] = OMIT, + obj_pos_y: typing.Optional[float] = OMIT, + mask_threshold: typing.Optional[float] = OMIT, + selected_model: typing.Optional[ObjectInpaintingPageRequestSelectedModel] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + output_width: typing.Optional[int] = OMIT, + output_height: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + sd2upscaling: typing.Optional[bool] = OMIT, + seed: typing.Optional[int] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> DocSummaryPageStatusResponse: + ) -> typing.Optional[ObjectInpaintingPageOutput]: """ Parameters ---------- - documents : typing.List[str] + input_image : str + + text_prompt : str - functions : typing.Optional[typing.List[RecipeFunction]] + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - task_instructions : typing.Optional[str] - - merge_instructions : typing.Optional[str] + obj_scale : typing.Optional[float] - selected_model : typing.Optional[PostV3DocSummaryAsyncFormRequestSelectedModel] + obj_pos_x : typing.Optional[float] - chain_type : typing.Optional[typing.Literal["map_reduce"]] + obj_pos_y : typing.Optional[float] - selected_asr_model : typing.Optional[PostV3DocSummaryAsyncFormRequestSelectedAsrModel] + mask_threshold : typing.Optional[float] - google_translate_target : typing.Optional[str] + selected_model : typing.Optional[ObjectInpaintingPageRequestSelectedModel] - avoid_repetition : typing.Optional[bool] + negative_prompt : typing.Optional[str] num_outputs : typing.Optional[int] - quality : typing.Optional[float] + quality : typing.Optional[int] - max_tokens : typing.Optional[int] + output_width : typing.Optional[int] - sampling_temperature : typing.Optional[float] + output_height : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + sd2upscaling : typing.Optional[bool] - response_format_type : typing.Optional[PostV3DocSummaryAsyncFormRequestResponseFormatType] + seed : typing.Optional[int] settings : typing.Optional[RunSettings] @@ -16100,7 +7538,7 @@ async def post_v3doc_summary_async_form( Returns ------- - DocSummaryPageStatusResponse + typing.Optional[ObjectInpaintingPageOutput] Successful Response Examples @@ -16115,147 +7553,148 @@ async def post_v3doc_summary_async_form( async def main() -> None: - await client.post_v3doc_summary_async_form( - documents=["documents"], + await client.product_image( + input_image="input_image", + text_prompt="text_prompt", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/doc-summary/async/form", + "v3/ObjectInpainting/async", method="POST", - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, - "documents": documents, - "task_instructions": task_instructions, - "merge_instructions": merge_instructions, + "input_image": input_image, + "text_prompt": text_prompt, + "obj_scale": obj_scale, + "obj_pos_x": obj_pos_x, + "obj_pos_y": obj_pos_y, + "mask_threshold": mask_threshold, "selected_model": selected_model, - "chain_type": chain_type, - "selected_asr_model": selected_asr_model, - "google_translate_target": google_translate_target, - "avoid_repetition": avoid_repetition, + "negative_prompt": negative_prompt, "num_outputs": num_outputs, "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, + "output_width": output_width, + "output_height": output_height, + "guidance_scale": guidance_scale, + "sd_2_upscaling": sd2upscaling, + "seed": seed, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast( - DocSummaryPageStatusResponse, + _parsed_response = typing.cast( + ObjectInpaintingPageStatusResponse, parse_obj_as( - type_=DocSummaryPageStatusResponse, # type: ignore + type_=ObjectInpaintingPageStatusResponse, # type: ignore object_=_response.json(), ), ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3embeddings_async_form( + async def portrait( self, *, - texts: typing.List[str], - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - selected_model: typing.Optional[PostV3EmbeddingsAsyncFormRequestSelectedModel] = None, - settings: typing.Optional[RunSettings] = None, + input_image: str, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + face_scale: typing.Optional[float] = OMIT, + face_pos_x: typing.Optional[float] = OMIT, + face_pos_y: typing.Optional[float] = OMIT, + selected_model: typing.Optional[FaceInpaintingPageRequestSelectedModel] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + upscale_factor: typing.Optional[float] = OMIT, + output_width: typing.Optional[int] = OMIT, + output_height: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + seed: typing.Optional[int] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> EmbeddingsPageStatusResponse: + ) -> typing.Optional[FaceInpaintingPageOutput]: """ Parameters ---------- - texts : typing.List[str] + input_image : str - functions : typing.Optional[typing.List[RecipeFunction]] + text_prompt : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - selected_model : typing.Optional[PostV3EmbeddingsAsyncFormRequestSelectedModel] - - settings : typing.Optional[RunSettings] + face_scale : typing.Optional[float] - request_options : typing.Optional[RequestOptions] - Request-specific configuration. + face_pos_x : typing.Optional[float] - Returns - ------- - EmbeddingsPageStatusResponse - Successful Response + face_pos_y : typing.Optional[float] - Examples - -------- - import asyncio + selected_model : typing.Optional[FaceInpaintingPageRequestSelectedModel] - from gooey import AsyncGooey + negative_prompt : typing.Optional[str] - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) + num_outputs : typing.Optional[int] + quality : typing.Optional[int] - async def main() -> None: - await client.post_v3embeddings_async_form( - texts=["texts"], - ) + upscale_factor : typing.Optional[float] + output_width : typing.Optional[int] - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/embeddings/async/form", - method="POST", - data={ - "functions": functions, - "variables": variables, - "texts": texts, - "selected_model": selected_model, - "settings": settings, - }, - files={}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - EmbeddingsPageStatusResponse, - parse_obj_as( - type_=EmbeddingsPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + output_height : typing.Optional[int] - async def post_v3functions_async_form( - self, - *, - code: typing.Optional[str] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> FunctionsPageStatusResponse: - """ - Parameters - ---------- - code : typing.Optional[str] - The JS code to be executed. + guidance_scale : typing.Optional[float] - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used in the code + seed : typing.Optional[int] settings : typing.Optional[RunSettings] @@ -16264,7 +7703,7 @@ async def post_v3functions_async_form( Returns ------- - FunctionsPageStatusResponse + typing.Optional[FaceInpaintingPageOutput] Successful Response Examples @@ -16279,121 +7718,174 @@ async def post_v3functions_async_form( async def main() -> None: - await client.post_v3functions_async_form() + await client.portrait( + input_image="input_image", + text_prompt="tony stark from the iron man", + ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/functions/async/form", + "v3/FaceInpainting/async", method="POST", - data={ - "code": code, + params={ + "example_id": example_id, + }, + json={ + "functions": functions, "variables": variables, + "input_image": input_image, + "text_prompt": text_prompt, + "face_scale": face_scale, + "face_pos_x": face_pos_x, + "face_pos_y": face_pos_y, + "selected_model": selected_model, + "negative_prompt": negative_prompt, + "num_outputs": num_outputs, + "quality": quality, + "upscale_factor": upscale_factor, + "output_width": output_width, + "output_height": output_height, + "guidance_scale": guidance_scale, + "seed": seed, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast( - FunctionsPageStatusResponse, + _parsed_response = typing.cast( + FaceInpaintingPageStatusResponse, parse_obj_as( - type_=FunctionsPageStatusResponse, # type: ignore + type_=FaceInpaintingPageStatusResponse, # type: ignore object_=_response.json(), ), ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3google_gpt_async_form( + async def image_from_email( self, *, - search_query: str, - site_filter: str, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - task_instructions: typing.Optional[str] = None, - query_instructions: typing.Optional[str] = None, - selected_model: typing.Optional[PostV3GoogleGptAsyncFormRequestSelectedModel] = None, - max_search_urls: typing.Optional[int] = None, - max_references: typing.Optional[int] = None, - max_context_words: typing.Optional[int] = None, - scroll_jump: typing.Optional[int] = None, - embedding_model: typing.Optional[PostV3GoogleGptAsyncFormRequestEmbeddingModel] = None, - dense_weight: typing.Optional[float] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[PostV3GoogleGptAsyncFormRequestResponseFormatType] = None, - serp_search_location: typing.Optional[SerpSearchLocation] = None, - scaleserp_locations: typing.Optional[typing.List[str]] = None, - serp_search_type: typing.Optional[SerpSearchType] = None, - scaleserp_search_field: typing.Optional[str] = None, - settings: typing.Optional[RunSettings] = None, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + email_address: typing.Optional[str] = OMIT, + twitter_handle: typing.Optional[str] = OMIT, + face_scale: typing.Optional[float] = OMIT, + face_pos_x: typing.Optional[float] = OMIT, + face_pos_y: typing.Optional[float] = OMIT, + selected_model: typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + upscale_factor: typing.Optional[float] = OMIT, + output_width: typing.Optional[int] = OMIT, + output_height: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + should_send_email: typing.Optional[bool] = OMIT, + email_from: typing.Optional[str] = OMIT, + email_cc: typing.Optional[str] = OMIT, + email_bcc: typing.Optional[str] = OMIT, + email_subject: typing.Optional[str] = OMIT, + email_body: typing.Optional[str] = OMIT, + email_body_enable_html: typing.Optional[bool] = OMIT, + fallback_email_body: typing.Optional[str] = OMIT, + seed: typing.Optional[int] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> GoogleGptPageStatusResponse: + ) -> typing.Optional[EmailFaceInpaintingPageOutput]: """ Parameters ---------- - search_query : str + text_prompt : str - site_filter : str + example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - task_instructions : typing.Optional[str] + email_address : typing.Optional[str] - query_instructions : typing.Optional[str] + twitter_handle : typing.Optional[str] - selected_model : typing.Optional[PostV3GoogleGptAsyncFormRequestSelectedModel] + face_scale : typing.Optional[float] - max_search_urls : typing.Optional[int] + face_pos_x : typing.Optional[float] - max_references : typing.Optional[int] + face_pos_y : typing.Optional[float] - max_context_words : typing.Optional[int] + selected_model : typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] - scroll_jump : typing.Optional[int] + negative_prompt : typing.Optional[str] - embedding_model : typing.Optional[PostV3GoogleGptAsyncFormRequestEmbeddingModel] + num_outputs : typing.Optional[int] - dense_weight : typing.Optional[float] + quality : typing.Optional[int] - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + upscale_factor : typing.Optional[float] + output_width : typing.Optional[int] - avoid_repetition : typing.Optional[bool] + output_height : typing.Optional[int] - num_outputs : typing.Optional[int] + guidance_scale : typing.Optional[float] - quality : typing.Optional[float] + should_send_email : typing.Optional[bool] - max_tokens : typing.Optional[int] + email_from : typing.Optional[str] - sampling_temperature : typing.Optional[float] + email_cc : typing.Optional[str] - response_format_type : typing.Optional[PostV3GoogleGptAsyncFormRequestResponseFormatType] + email_bcc : typing.Optional[str] - serp_search_location : typing.Optional[SerpSearchLocation] + email_subject : typing.Optional[str] - scaleserp_locations : typing.Optional[typing.List[str]] - DEPRECATED: use `serp_search_location` instead + email_body : typing.Optional[str] - serp_search_type : typing.Optional[SerpSearchType] + email_body_enable_html : typing.Optional[bool] - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead + fallback_email_body : typing.Optional[str] + + seed : typing.Optional[int] settings : typing.Optional[RunSettings] @@ -16402,7 +7894,7 @@ async def post_v3google_gpt_async_form( Returns ------- - GoogleGptPageStatusResponse + typing.Optional[EmailFaceInpaintingPageOutput] Successful Response Examples @@ -16417,151 +7909,154 @@ async def post_v3google_gpt_async_form( async def main() -> None: - await client.post_v3google_gpt_async_form( - search_query="search_query", - site_filter="site_filter", + await client.image_from_email( + email_address="sean@dara.network", + text_prompt="winter's day in paris", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/google-gpt/async/form", + "v3/EmailFaceInpainting/async", method="POST", - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, - "search_query": search_query, - "site_filter": site_filter, - "task_instructions": task_instructions, - "query_instructions": query_instructions, + "email_address": email_address, + "twitter_handle": twitter_handle, + "text_prompt": text_prompt, + "face_scale": face_scale, + "face_pos_x": face_pos_x, + "face_pos_y": face_pos_y, "selected_model": selected_model, - "max_search_urls": max_search_urls, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "avoid_repetition": avoid_repetition, + "negative_prompt": negative_prompt, "num_outputs": num_outputs, "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, + "upscale_factor": upscale_factor, + "output_width": output_width, + "output_height": output_height, + "guidance_scale": guidance_scale, + "should_send_email": should_send_email, + "email_from": email_from, + "email_cc": email_cc, + "email_bcc": email_bcc, + "email_subject": email_subject, + "email_body": email_body, + "email_body_enable_html": email_body_enable_html, + "fallback_email_body": fallback_email_body, + "seed": seed, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast( - GoogleGptPageStatusResponse, + _parsed_response = typing.cast( + EmailFaceInpaintingPageStatusResponse, parse_obj_as( - type_=GoogleGptPageStatusResponse, # type: ignore + type_=EmailFaceInpaintingPageStatusResponse, # type: ignore object_=_response.json(), ), ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3related_qna_maker_doc_async_form( + async def image_from_web_search( self, *, search_query: str, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - keyword_query: typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestKeywordQuery] = None, - documents: typing.Optional[typing.List[str]] = None, - max_references: typing.Optional[int] = None, - max_context_words: typing.Optional[int] = None, - scroll_jump: typing.Optional[int] = None, - doc_extract_url: typing.Optional[str] = None, - embedding_model: typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestEmbeddingModel] = None, - dense_weight: typing.Optional[float] = None, - task_instructions: typing.Optional[str] = None, - query_instructions: typing.Optional[str] = None, - selected_model: typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestSelectedModel] = None, - citation_style: typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestCitationStyle] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestResponseFormatType] = None, - serp_search_location: typing.Optional[SerpSearchLocation] = None, - scaleserp_locations: typing.Optional[typing.List[str]] = None, - serp_search_type: typing.Optional[SerpSearchType] = None, - scaleserp_search_field: typing.Optional[str] = None, - settings: typing.Optional[RunSettings] = None, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, + selected_model: typing.Optional[GoogleImageGenPageRequestSelectedModel] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + prompt_strength: typing.Optional[float] = OMIT, + sd2upscaling: typing.Optional[bool] = OMIT, + seed: typing.Optional[int] = OMIT, + image_guidance_scale: typing.Optional[float] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> RelatedQnADocPageStatusResponse: + ) -> typing.Optional[GoogleImageGenPageOutput]: """ Parameters ---------- search_query : str - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - keyword_query : typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestKeywordQuery] - - documents : typing.Optional[typing.List[str]] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - doc_extract_url : typing.Optional[str] - - embedding_model : typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestEmbeddingModel] - - dense_weight : typing.Optional[float] + text_prompt : str - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + example_id : typing.Optional[str] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - task_instructions : typing.Optional[str] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments - query_instructions : typing.Optional[str] + serp_search_location : typing.Optional[SerpSearchLocation] - selected_model : typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestSelectedModel] + scaleserp_locations : typing.Optional[typing.Sequence[str]] + DEPRECATED: use `serp_search_location` instead - citation_style : typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestCitationStyle] + selected_model : typing.Optional[GoogleImageGenPageRequestSelectedModel] - avoid_repetition : typing.Optional[bool] + negative_prompt : typing.Optional[str] num_outputs : typing.Optional[int] - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] + quality : typing.Optional[int] - response_format_type : typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestResponseFormatType] + guidance_scale : typing.Optional[float] - serp_search_location : typing.Optional[SerpSearchLocation] + prompt_strength : typing.Optional[float] - scaleserp_locations : typing.Optional[typing.List[str]] - DEPRECATED: use `serp_search_location` instead + sd2upscaling : typing.Optional[bool] - serp_search_type : typing.Optional[SerpSearchType] + seed : typing.Optional[int] - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead + image_guidance_scale : typing.Optional[float] settings : typing.Optional[RunSettings] @@ -16570,7 +8065,7 @@ async def post_v3related_qna_maker_doc_async_form( Returns ------- - RelatedQnADocPageStatusResponse + typing.Optional[GoogleImageGenPageOutput] Successful Response Examples @@ -16585,146 +8080,128 @@ async def post_v3related_qna_maker_doc_async_form( async def main() -> None: - await client.post_v3related_qna_maker_doc_async_form( + await client.image_from_web_search( search_query="search_query", + text_prompt="text_prompt", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/related-qna-maker-doc/async/form", + "v3/GoogleImageGen/async", method="POST", - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, "search_query": search_query, - "keyword_query": keyword_query, - "documents": documents, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "doc_extract_url": doc_extract_url, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "task_instructions": task_instructions, - "query_instructions": query_instructions, + "text_prompt": text_prompt, "selected_model": selected_model, - "citation_style": citation_style, - "avoid_repetition": avoid_repetition, + "negative_prompt": negative_prompt, "num_outputs": num_outputs, "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, + "guidance_scale": guidance_scale, + "prompt_strength": prompt_strength, + "sd_2_upscaling": sd2upscaling, + "seed": seed, + "image_guidance_scale": image_guidance_scale, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast( - RelatedQnADocPageStatusResponse, + _parsed_response = typing.cast( + GoogleImageGenPageStatusResponse, parse_obj_as( - type_=RelatedQnADocPageStatusResponse, # type: ignore + type_=GoogleImageGenPageStatusResponse, # type: ignore object_=_response.json(), ), ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3related_qna_maker_async_form( + async def remove_background( self, *, - search_query: str, - site_filter: str, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - task_instructions: typing.Optional[str] = None, - query_instructions: typing.Optional[str] = None, - selected_model: typing.Optional[PostV3RelatedQnaMakerAsyncFormRequestSelectedModel] = None, - max_search_urls: typing.Optional[int] = None, - max_references: typing.Optional[int] = None, - max_context_words: typing.Optional[int] = None, - scroll_jump: typing.Optional[int] = None, - embedding_model: typing.Optional[PostV3RelatedQnaMakerAsyncFormRequestEmbeddingModel] = None, - dense_weight: typing.Optional[float] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[PostV3RelatedQnaMakerAsyncFormRequestResponseFormatType] = None, - serp_search_location: typing.Optional[SerpSearchLocation] = None, - scaleserp_locations: typing.Optional[typing.List[str]] = None, - serp_search_type: typing.Optional[SerpSearchType] = None, - scaleserp_search_field: typing.Optional[str] = None, - settings: typing.Optional[RunSettings] = None, + input_image: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + selected_model: typing.Optional[ImageSegmentationPageRequestSelectedModel] = OMIT, + mask_threshold: typing.Optional[float] = OMIT, + rect_persepective_transform: typing.Optional[bool] = OMIT, + reflection_opacity: typing.Optional[float] = OMIT, + obj_scale: typing.Optional[float] = OMIT, + obj_pos_x: typing.Optional[float] = OMIT, + obj_pos_y: typing.Optional[float] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> RelatedQnAPageStatusResponse: + ) -> typing.Optional[ImageSegmentationPageOutput]: """ Parameters ---------- - search_query : str - - site_filter : str - - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - selected_model : typing.Optional[PostV3RelatedQnaMakerAsyncFormRequestSelectedModel] - - max_search_urls : typing.Optional[int] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - embedding_model : typing.Optional[PostV3RelatedQnaMakerAsyncFormRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - + input_image : str - avoid_repetition : typing.Optional[bool] + example_id : typing.Optional[str] - num_outputs : typing.Optional[int] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - quality : typing.Optional[float] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments - max_tokens : typing.Optional[int] + selected_model : typing.Optional[ImageSegmentationPageRequestSelectedModel] - sampling_temperature : typing.Optional[float] + mask_threshold : typing.Optional[float] - response_format_type : typing.Optional[PostV3RelatedQnaMakerAsyncFormRequestResponseFormatType] + rect_persepective_transform : typing.Optional[bool] - serp_search_location : typing.Optional[SerpSearchLocation] + reflection_opacity : typing.Optional[float] - scaleserp_locations : typing.Optional[typing.List[str]] - DEPRECATED: use `serp_search_location` instead + obj_scale : typing.Optional[float] - serp_search_type : typing.Optional[SerpSearchType] + obj_pos_x : typing.Optional[float] - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead + obj_pos_y : typing.Optional[float] settings : typing.Optional[RunSettings] @@ -16733,7 +8210,7 @@ async def post_v3related_qna_maker_async_form( Returns ------- - RelatedQnAPageStatusResponse + typing.Optional[ImageSegmentationPageOutput] Successful Response Examples @@ -16748,103 +8225,116 @@ async def post_v3related_qna_maker_async_form( async def main() -> None: - await client.post_v3related_qna_maker_async_form( - search_query="search_query", - site_filter="site_filter", + await client.remove_background( + input_image="input_image", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/related-qna-maker/async/form", + "v3/ImageSegmentation/async", method="POST", - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, - "search_query": search_query, - "site_filter": site_filter, - "task_instructions": task_instructions, - "query_instructions": query_instructions, + "input_image": input_image, "selected_model": selected_model, - "max_search_urls": max_search_urls, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, + "mask_threshold": mask_threshold, + "rect_persepective_transform": rect_persepective_transform, + "reflection_opacity": reflection_opacity, + "obj_scale": obj_scale, + "obj_pos_x": obj_pos_x, + "obj_pos_y": obj_pos_y, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast( - RelatedQnAPageStatusResponse, + _parsed_response = typing.cast( + ImageSegmentationPageStatusResponse, parse_obj_as( - type_=RelatedQnAPageStatusResponse, # type: ignore + type_=ImageSegmentationPageStatusResponse, # type: ignore object_=_response.json(), ), ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3text2audio_async_form( + async def upscale( self, *, - text_prompt: str, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - negative_prompt: typing.Optional[str] = None, - duration_sec: typing.Optional[float] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[int] = None, - guidance_scale: typing.Optional[float] = None, - seed: typing.Optional[int] = None, - sd2upscaling: typing.Optional[bool] = None, - selected_models: typing.Optional[typing.List[typing.Literal["audio_ldm"]]] = None, - settings: typing.Optional[RunSettings] = None, + scale: int, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + input_image: typing.Optional[str] = OMIT, + input_video: typing.Optional[str] = OMIT, + selected_models: typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]] = OMIT, + selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> Text2AudioPageStatusResponse: + ) -> typing.Optional[CompareUpscalerPageOutput]: """ Parameters ---------- - text_prompt : str + scale : int + The final upsampling scale of the image + + example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - negative_prompt : typing.Optional[str] - - duration_sec : typing.Optional[float] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - guidance_scale : typing.Optional[float] + input_image : typing.Optional[str] + Input Image - seed : typing.Optional[int] + input_video : typing.Optional[str] + Input Video - sd2upscaling : typing.Optional[bool] + selected_models : typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]] - selected_models : typing.Optional[typing.List[typing.Literal["audio_ldm"]]] + selected_bg_model : typing.Optional[typing.Literal["real_esrgan_x2"]] settings : typing.Optional[RunSettings] @@ -16853,7 +8343,7 @@ async def post_v3text2audio_async_form( Returns ------- - Text2AudioPageStatusResponse + typing.Optional[CompareUpscalerPageOutput] Successful Response Examples @@ -16868,80 +8358,101 @@ async def post_v3text2audio_async_form( async def main() -> None: - await client.post_v3text2audio_async_form( - text_prompt="text_prompt", + await client.upscale( + scale=1, ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/text2audio/async/form", + "v3/compare-ai-upscalers/async", method="POST", - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, - "text_prompt": text_prompt, - "negative_prompt": negative_prompt, - "duration_sec": duration_sec, - "num_outputs": num_outputs, - "quality": quality, - "guidance_scale": guidance_scale, - "seed": seed, - "sd_2_upscaling": sd2upscaling, + "input_image": input_image, + "input_video": input_video, + "scale": scale, "selected_models": selected_models, + "selected_bg_model": selected_bg_model, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast( - Text2AudioPageStatusResponse, + _parsed_response = typing.cast( + CompareUpscalerPageStatusResponse, parse_obj_as( - type_=Text2AudioPageStatusResponse, # type: ignore + type_=CompareUpscalerPageStatusResponse, # type: ignore object_=_response.json(), ), ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3translate_async_form( + async def embed( self, *, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - texts: typing.Optional[typing.List[str]] = None, - selected_model: typing.Optional[PostV3TranslateAsyncFormRequestSelectedModel] = None, - translation_source: typing.Optional[str] = None, - translation_target: typing.Optional[str] = None, - glossary_document: typing.Optional[str] = None, - settings: typing.Optional[RunSettings] = None, + texts: typing.Sequence[str], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + selected_model: typing.Optional[EmbeddingsPageRequestSelectedModel] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> TranslationPageStatusResponse: + ) -> typing.Optional[EmbeddingsPageOutput]: """ Parameters ---------- - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - texts : typing.Optional[typing.List[str]] + texts : typing.Sequence[str] - selected_model : typing.Optional[PostV3TranslateAsyncFormRequestSelectedModel] + example_id : typing.Optional[str] - translation_source : typing.Optional[str] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - translation_target : typing.Optional[str] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments - glossary_document : typing.Optional[str] - Provide a glossary to customize translation and improve accuracy of domain-specific terms. - If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). + selected_model : typing.Optional[EmbeddingsPageRequestSelectedModel] settings : typing.Optional[RunSettings] @@ -16950,7 +8461,7 @@ async def post_v3translate_async_form( Returns ------- - TranslationPageStatusResponse + typing.Optional[EmbeddingsPageOutput] Successful Response Examples @@ -16965,143 +8476,121 @@ async def post_v3translate_async_form( async def main() -> None: - await client.post_v3translate_async_form() + await client.embed( + texts=["texts"], + ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/translate/async/form", + "v3/embeddings/async", method="POST", - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, "texts": texts, "selected_model": selected_model, - "translation_source": translation_source, - "translation_target": translation_target, - "glossary_document": glossary_document, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast( - TranslationPageStatusResponse, + _parsed_response = typing.cast( + EmbeddingsPageStatusResponse, parse_obj_as( - type_=TranslationPageStatusResponse, # type: ignore + type_=EmbeddingsPageStatusResponse, # type: ignore object_=_response.json(), ), ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3video_bots_async_form( + async def seo_people_also_ask_doc( self, *, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - input_prompt: typing.Optional[str] = None, - input_audio: typing.Optional[str] = None, - input_images: typing.Optional[typing.List[str]] = None, - input_documents: typing.Optional[typing.List[str]] = None, - doc_extract_url: typing.Optional[str] = None, - messages: typing.Optional[typing.List[ConversationEntry]] = None, - bot_script: typing.Optional[str] = None, - selected_model: typing.Optional[PostV3VideoBotsAsyncFormRequestSelectedModel] = None, - document_model: typing.Optional[str] = None, - task_instructions: typing.Optional[str] = None, - query_instructions: typing.Optional[str] = None, - keyword_instructions: typing.Optional[str] = None, - documents: typing.Optional[typing.List[str]] = None, - max_references: typing.Optional[int] = None, - max_context_words: typing.Optional[int] = None, - scroll_jump: typing.Optional[int] = None, - embedding_model: typing.Optional[PostV3VideoBotsAsyncFormRequestEmbeddingModel] = None, - dense_weight: typing.Optional[float] = None, - citation_style: typing.Optional[PostV3VideoBotsAsyncFormRequestCitationStyle] = None, - use_url_shortener: typing.Optional[bool] = None, - asr_model: typing.Optional[PostV3VideoBotsAsyncFormRequestAsrModel] = None, - asr_language: typing.Optional[str] = None, - translation_model: typing.Optional[PostV3VideoBotsAsyncFormRequestTranslationModel] = None, - user_language: typing.Optional[str] = None, - input_glossary_document: typing.Optional[str] = None, - output_glossary_document: typing.Optional[str] = None, - lipsync_model: typing.Optional[PostV3VideoBotsAsyncFormRequestLipsyncModel] = None, - tools: typing.Optional[typing.List[LlmTools]] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[PostV3VideoBotsAsyncFormRequestResponseFormatType] = None, - tts_provider: typing.Optional[PostV3VideoBotsAsyncFormRequestTtsProvider] = None, - uberduck_voice_name: typing.Optional[str] = None, - uberduck_speaking_rate: typing.Optional[float] = None, - google_voice_name: typing.Optional[str] = None, - google_speaking_rate: typing.Optional[float] = None, - google_pitch: typing.Optional[float] = None, - bark_history_prompt: typing.Optional[str] = None, - elevenlabs_voice_name: typing.Optional[str] = None, - elevenlabs_api_key: typing.Optional[str] = None, - elevenlabs_voice_id: typing.Optional[str] = None, - elevenlabs_model: typing.Optional[str] = None, - elevenlabs_stability: typing.Optional[float] = None, - elevenlabs_similarity_boost: typing.Optional[float] = None, - elevenlabs_style: typing.Optional[float] = None, - elevenlabs_speaker_boost: typing.Optional[bool] = None, - azure_voice_name: typing.Optional[str] = None, - openai_voice_name: typing.Optional[PostV3VideoBotsAsyncFormRequestOpenaiVoiceName] = None, - openai_tts_model: typing.Optional[PostV3VideoBotsAsyncFormRequestOpenaiTtsModel] = None, - input_face: typing.Optional[str] = None, - face_padding_top: typing.Optional[int] = None, - face_padding_bottom: typing.Optional[int] = None, - face_padding_left: typing.Optional[int] = None, - face_padding_right: typing.Optional[int] = None, - sadtalker_settings: typing.Optional[SadTalkerSettings] = None, - settings: typing.Optional[RunSettings] = None, + search_query: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + keyword_query: typing.Optional[RelatedQnADocPageRequestKeywordQuery] = OMIT, + documents: typing.Optional[typing.Sequence[str]] = OMIT, + max_references: typing.Optional[int] = OMIT, + max_context_words: typing.Optional[int] = OMIT, + scroll_jump: typing.Optional[int] = OMIT, + doc_extract_url: typing.Optional[str] = OMIT, + embedding_model: typing.Optional[RelatedQnADocPageRequestEmbeddingModel] = OMIT, + dense_weight: typing.Optional[float] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + query_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[RelatedQnADocPageRequestSelectedModel] = OMIT, + citation_style: typing.Optional[RelatedQnADocPageRequestCitationStyle] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[RelatedQnADocPageRequestResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, + serp_search_type: typing.Optional[SerpSearchType] = OMIT, + scaleserp_search_field: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> VideoBotsPageStatusResponse: + ) -> typing.Optional[RelatedQnADocPageOutput]: """ Parameters ---------- - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - input_prompt : typing.Optional[str] - - input_audio : typing.Optional[str] - - input_images : typing.Optional[typing.List[str]] - - input_documents : typing.Optional[typing.List[str]] - - doc_extract_url : typing.Optional[str] - Select a workflow to extract text from documents and images. - - messages : typing.Optional[typing.List[ConversationEntry]] - - bot_script : typing.Optional[str] - - selected_model : typing.Optional[PostV3VideoBotsAsyncFormRequestSelectedModel] + search_query : str - document_model : typing.Optional[str] - When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) + example_id : typing.Optional[str] - task_instructions : typing.Optional[str] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - query_instructions : typing.Optional[str] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments - keyword_instructions : typing.Optional[str] + keyword_query : typing.Optional[RelatedQnADocPageRequestKeywordQuery] - documents : typing.Optional[typing.List[str]] + documents : typing.Optional[typing.Sequence[str]] max_references : typing.Optional[int] @@ -17109,7 +8598,9 @@ async def post_v3video_bots_async_form( scroll_jump : typing.Optional[int] - embedding_model : typing.Optional[PostV3VideoBotsAsyncFormRequestEmbeddingModel] + doc_extract_url : typing.Optional[str] + + embedding_model : typing.Optional[RelatedQnADocPageRequestEmbeddingModel] dense_weight : typing.Optional[float] @@ -17117,35 +8608,13 @@ async def post_v3video_bots_async_form( Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - citation_style : typing.Optional[PostV3VideoBotsAsyncFormRequestCitationStyle] - - use_url_shortener : typing.Optional[bool] - - asr_model : typing.Optional[PostV3VideoBotsAsyncFormRequestAsrModel] - Choose a model to transcribe incoming audio messages to text. - - asr_language : typing.Optional[str] - Choose a language to transcribe incoming audio messages to text. - - translation_model : typing.Optional[PostV3VideoBotsAsyncFormRequestTranslationModel] - - user_language : typing.Optional[str] - Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. - - input_glossary_document : typing.Optional[str] - - Translation Glossary for User Langauge -> LLM Language (English) - - - output_glossary_document : typing.Optional[str] - - Translation Glossary for LLM Language (English) -> User Langauge + task_instructions : typing.Optional[str] + query_instructions : typing.Optional[str] - lipsync_model : typing.Optional[PostV3VideoBotsAsyncFormRequestLipsyncModel] + selected_model : typing.Optional[RelatedQnADocPageRequestSelectedModel] - tools : typing.Optional[typing.List[LlmTools]] - Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). + citation_style : typing.Optional[RelatedQnADocPageRequestCitationStyle] avoid_repetition : typing.Optional[bool] @@ -17157,56 +8626,17 @@ async def post_v3video_bots_async_form( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[PostV3VideoBotsAsyncFormRequestResponseFormatType] - - tts_provider : typing.Optional[PostV3VideoBotsAsyncFormRequestTtsProvider] - - uberduck_voice_name : typing.Optional[str] - - uberduck_speaking_rate : typing.Optional[float] - - google_voice_name : typing.Optional[str] - - google_speaking_rate : typing.Optional[float] - - google_pitch : typing.Optional[float] - - bark_history_prompt : typing.Optional[str] - - elevenlabs_voice_name : typing.Optional[str] - Use `elevenlabs_voice_id` instead - - elevenlabs_api_key : typing.Optional[str] - - elevenlabs_voice_id : typing.Optional[str] - - elevenlabs_model : typing.Optional[str] - - elevenlabs_stability : typing.Optional[float] - - elevenlabs_similarity_boost : typing.Optional[float] - - elevenlabs_style : typing.Optional[float] - - elevenlabs_speaker_boost : typing.Optional[bool] - - azure_voice_name : typing.Optional[str] - - openai_voice_name : typing.Optional[PostV3VideoBotsAsyncFormRequestOpenaiVoiceName] - - openai_tts_model : typing.Optional[PostV3VideoBotsAsyncFormRequestOpenaiTtsModel] - - input_face : typing.Optional[str] - - face_padding_top : typing.Optional[int] + response_format_type : typing.Optional[RelatedQnADocPageRequestResponseFormatType] - face_padding_bottom : typing.Optional[int] + serp_search_location : typing.Optional[SerpSearchLocation] - face_padding_left : typing.Optional[int] + scaleserp_locations : typing.Optional[typing.Sequence[str]] + DEPRECATED: use `serp_search_location` instead - face_padding_right : typing.Optional[int] + serp_search_type : typing.Optional[SerpSearchType] - sadtalker_settings : typing.Optional[SadTalkerSettings] + scaleserp_search_field : typing.Optional[str] + DEPRECATED: use `serp_search_type` instead settings : typing.Optional[RunSettings] @@ -17215,7 +8645,7 @@ async def post_v3video_bots_async_form( Returns ------- - VideoBotsPageStatusResponse + typing.Optional[RelatedQnADocPageOutput] Successful Response Examples @@ -17230,87 +8660,137 @@ async def post_v3video_bots_async_form( async def main() -> None: - await client.post_v3video_bots_async_form() + await client.seo_people_also_ask_doc( + search_query="search_query", + ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/video-bots/async/form", + "v3/related-qna-maker-doc/async", method="POST", - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, - "input_prompt": input_prompt, - "input_audio": input_audio, - "input_images": input_images, - "input_documents": input_documents, - "doc_extract_url": doc_extract_url, - "messages": messages, - "bot_script": bot_script, - "selected_model": selected_model, - "document_model": document_model, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "keyword_instructions": keyword_instructions, + "search_query": search_query, + "keyword_query": keyword_query, "documents": documents, "max_references": max_references, "max_context_words": max_context_words, "scroll_jump": scroll_jump, + "doc_extract_url": doc_extract_url, "embedding_model": embedding_model, "dense_weight": dense_weight, + "task_instructions": task_instructions, + "query_instructions": query_instructions, + "selected_model": selected_model, "citation_style": citation_style, - "use_url_shortener": use_url_shortener, - "asr_model": asr_model, - "asr_language": asr_language, - "translation_model": translation_model, - "user_language": user_language, - "input_glossary_document": input_glossary_document, - "output_glossary_document": output_glossary_document, - "lipsync_model": lipsync_model, - "tools": tools, "avoid_repetition": avoid_repetition, "num_outputs": num_outputs, "quality": quality, "max_tokens": max_tokens, "sampling_temperature": sampling_temperature, "response_format_type": response_format_type, - "tts_provider": tts_provider, - "uberduck_voice_name": uberduck_voice_name, - "uberduck_speaking_rate": uberduck_speaking_rate, - "google_voice_name": google_voice_name, - "google_speaking_rate": google_speaking_rate, - "google_pitch": google_pitch, - "bark_history_prompt": bark_history_prompt, - "elevenlabs_voice_name": elevenlabs_voice_name, - "elevenlabs_api_key": elevenlabs_api_key, - "elevenlabs_voice_id": elevenlabs_voice_id, - "elevenlabs_model": elevenlabs_model, - "elevenlabs_stability": elevenlabs_stability, - "elevenlabs_similarity_boost": elevenlabs_similarity_boost, - "elevenlabs_style": elevenlabs_style, - "elevenlabs_speaker_boost": elevenlabs_speaker_boost, - "azure_voice_name": azure_voice_name, - "openai_voice_name": openai_voice_name, - "openai_tts_model": openai_tts_model, - "input_face": input_face, - "face_padding_top": face_padding_top, - "face_padding_bottom": face_padding_bottom, - "face_padding_left": face_padding_left, - "face_padding_right": face_padding_right, - "sadtalker_settings": sadtalker_settings, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "serp_search_type": serp_search_type, + "scaleserp_search_field": scaleserp_search_field, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) + try: + if 200 <= _response.status_code < 300: + _parsed_response = typing.cast( + RelatedQnADocPageStatusResponse, + parse_obj_as( + type_=RelatedQnADocPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def health_status_get( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.health_status_get() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "status", + method="GET", + request_options=request_options, + ) try: if 200 <= _response.status_code < 300: return typing.cast( - VideoBotsPageStatusResponse, + typing.Optional[typing.Any], parse_obj_as( - type_=VideoBotsPageStatusResponse, # type: ignore + type_=typing.Optional[typing.Any], # type: ignore object_=_response.json(), ), ) diff --git a/src/gooey/core/client_wrapper.py b/src/gooey/core/client_wrapper.py index 4793299..7822ad4 100644 --- a/src/gooey/core/client_wrapper.py +++ b/src/gooey/core/client_wrapper.py @@ -22,7 +22,7 @@ def get_headers(self) -> typing.Dict[str, str]: headers: typing.Dict[str, str] = { "X-Fern-Language": "Python", "X-Fern-SDK-Name": "gooeyai", - "X-Fern-SDK-Version": "0.0.1-beta7", + "X-Fern-SDK-Version": "0.0.1-beta8", } headers["Authorization"] = f"Bearer {self._get_api_key()}" return headers diff --git a/src/gooey/types/__init__.py b/src/gooey/types/__init__.py index da61628..83278eb 100644 --- a/src/gooey/types/__init__.py +++ b/src/gooey/types/__init__.py @@ -121,127 +121,6 @@ from .object_inpainting_page_output import ObjectInpaintingPageOutput from .object_inpainting_page_request_selected_model import ObjectInpaintingPageRequestSelectedModel from .object_inpainting_page_status_response import ObjectInpaintingPageStatusResponse -from .post_v3art_qr_code_async_form_request_image_prompt_controlnet_models_item import ( - PostV3ArtQrCodeAsyncFormRequestImagePromptControlnetModelsItem, -) -from .post_v3art_qr_code_async_form_request_scheduler import PostV3ArtQrCodeAsyncFormRequestScheduler -from .post_v3art_qr_code_async_form_request_selected_controlnet_model_item import ( - PostV3ArtQrCodeAsyncFormRequestSelectedControlnetModelItem, -) -from .post_v3art_qr_code_async_form_request_selected_model import PostV3ArtQrCodeAsyncFormRequestSelectedModel -from .post_v3asr_async_form_request_output_format import PostV3AsrAsyncFormRequestOutputFormat -from .post_v3asr_async_form_request_selected_model import PostV3AsrAsyncFormRequestSelectedModel -from .post_v3asr_async_form_request_translation_model import PostV3AsrAsyncFormRequestTranslationModel -from .post_v3bulk_eval_async_form_request_response_format_type import PostV3BulkEvalAsyncFormRequestResponseFormatType -from .post_v3bulk_eval_async_form_request_selected_model import PostV3BulkEvalAsyncFormRequestSelectedModel -from .post_v3compare_ai_upscalers_async_form_request_selected_models_item import ( - PostV3CompareAiUpscalersAsyncFormRequestSelectedModelsItem, -) -from .post_v3compare_llm_async_form_request_response_format_type import ( - PostV3CompareLlmAsyncFormRequestResponseFormatType, -) -from .post_v3compare_llm_async_form_request_selected_models_item import ( - PostV3CompareLlmAsyncFormRequestSelectedModelsItem, -) -from .post_v3compare_text2img_async_form_request_scheduler import PostV3CompareText2ImgAsyncFormRequestScheduler -from .post_v3compare_text2img_async_form_request_selected_models_item import ( - PostV3CompareText2ImgAsyncFormRequestSelectedModelsItem, -) -from .post_v3deforum_sd_async_form_request_selected_model import PostV3DeforumSdAsyncFormRequestSelectedModel -from .post_v3doc_extract_async_form_request_response_format_type import ( - PostV3DocExtractAsyncFormRequestResponseFormatType, -) -from .post_v3doc_extract_async_form_request_selected_asr_model import PostV3DocExtractAsyncFormRequestSelectedAsrModel -from .post_v3doc_extract_async_form_request_selected_model import PostV3DocExtractAsyncFormRequestSelectedModel -from .post_v3doc_search_async_form_request_citation_style import PostV3DocSearchAsyncFormRequestCitationStyle -from .post_v3doc_search_async_form_request_embedding_model import PostV3DocSearchAsyncFormRequestEmbeddingModel -from .post_v3doc_search_async_form_request_keyword_query import PostV3DocSearchAsyncFormRequestKeywordQuery -from .post_v3doc_search_async_form_request_response_format_type import PostV3DocSearchAsyncFormRequestResponseFormatType -from .post_v3doc_search_async_form_request_selected_model import PostV3DocSearchAsyncFormRequestSelectedModel -from .post_v3doc_summary_async_form_request_response_format_type import ( - PostV3DocSummaryAsyncFormRequestResponseFormatType, -) -from .post_v3doc_summary_async_form_request_selected_asr_model import PostV3DocSummaryAsyncFormRequestSelectedAsrModel -from .post_v3doc_summary_async_form_request_selected_model import PostV3DocSummaryAsyncFormRequestSelectedModel -from .post_v3email_face_inpainting_async_form_request_selected_model import ( - PostV3EmailFaceInpaintingAsyncFormRequestSelectedModel, -) -from .post_v3embeddings_async_form_request_selected_model import PostV3EmbeddingsAsyncFormRequestSelectedModel -from .post_v3face_inpainting_async_form_request_selected_model import PostV3FaceInpaintingAsyncFormRequestSelectedModel -from .post_v3google_gpt_async_form_request_embedding_model import PostV3GoogleGptAsyncFormRequestEmbeddingModel -from .post_v3google_gpt_async_form_request_response_format_type import PostV3GoogleGptAsyncFormRequestResponseFormatType -from .post_v3google_gpt_async_form_request_selected_model import PostV3GoogleGptAsyncFormRequestSelectedModel -from .post_v3google_image_gen_async_form_request_selected_model import PostV3GoogleImageGenAsyncFormRequestSelectedModel -from .post_v3image_segmentation_async_form_request_selected_model import ( - PostV3ImageSegmentationAsyncFormRequestSelectedModel, -) -from .post_v3img2img_async_form_request_selected_controlnet_model import ( - PostV3Img2ImgAsyncFormRequestSelectedControlnetModel, -) -from .post_v3img2img_async_form_request_selected_controlnet_model_item import ( - PostV3Img2ImgAsyncFormRequestSelectedControlnetModelItem, -) -from .post_v3img2img_async_form_request_selected_model import PostV3Img2ImgAsyncFormRequestSelectedModel -from .post_v3lipsync_async_form_request_selected_model import PostV3LipsyncAsyncFormRequestSelectedModel -from .post_v3lipsync_tts_async_form_request_openai_tts_model import PostV3LipsyncTtsAsyncFormRequestOpenaiTtsModel -from .post_v3lipsync_tts_async_form_request_openai_voice_name import PostV3LipsyncTtsAsyncFormRequestOpenaiVoiceName -from .post_v3lipsync_tts_async_form_request_selected_model import PostV3LipsyncTtsAsyncFormRequestSelectedModel -from .post_v3lipsync_tts_async_form_request_tts_provider import PostV3LipsyncTtsAsyncFormRequestTtsProvider -from .post_v3object_inpainting_async_form_request_selected_model import ( - PostV3ObjectInpaintingAsyncFormRequestSelectedModel, -) -from .post_v3related_qna_maker_async_form_request_embedding_model import ( - PostV3RelatedQnaMakerAsyncFormRequestEmbeddingModel, -) -from .post_v3related_qna_maker_async_form_request_response_format_type import ( - PostV3RelatedQnaMakerAsyncFormRequestResponseFormatType, -) -from .post_v3related_qna_maker_async_form_request_selected_model import ( - PostV3RelatedQnaMakerAsyncFormRequestSelectedModel, -) -from .post_v3related_qna_maker_doc_async_form_request_citation_style import ( - PostV3RelatedQnaMakerDocAsyncFormRequestCitationStyle, -) -from .post_v3related_qna_maker_doc_async_form_request_embedding_model import ( - PostV3RelatedQnaMakerDocAsyncFormRequestEmbeddingModel, -) -from .post_v3related_qna_maker_doc_async_form_request_keyword_query import ( - PostV3RelatedQnaMakerDocAsyncFormRequestKeywordQuery, -) -from .post_v3related_qna_maker_doc_async_form_request_response_format_type import ( - PostV3RelatedQnaMakerDocAsyncFormRequestResponseFormatType, -) -from .post_v3related_qna_maker_doc_async_form_request_selected_model import ( - PostV3RelatedQnaMakerDocAsyncFormRequestSelectedModel, -) -from .post_v3seo_summary_async_form_request_response_format_type import ( - PostV3SeoSummaryAsyncFormRequestResponseFormatType, -) -from .post_v3seo_summary_async_form_request_selected_model import PostV3SeoSummaryAsyncFormRequestSelectedModel -from .post_v3smart_gpt_async_form_request_response_format_type import PostV3SmartGptAsyncFormRequestResponseFormatType -from .post_v3smart_gpt_async_form_request_selected_model import PostV3SmartGptAsyncFormRequestSelectedModel -from .post_v3social_lookup_email_async_form_request_response_format_type import ( - PostV3SocialLookupEmailAsyncFormRequestResponseFormatType, -) -from .post_v3social_lookup_email_async_form_request_selected_model import ( - PostV3SocialLookupEmailAsyncFormRequestSelectedModel, -) -from .post_v3text_to_speech_async_form_request_openai_tts_model import PostV3TextToSpeechAsyncFormRequestOpenaiTtsModel -from .post_v3text_to_speech_async_form_request_openai_voice_name import ( - PostV3TextToSpeechAsyncFormRequestOpenaiVoiceName, -) -from .post_v3text_to_speech_async_form_request_tts_provider import PostV3TextToSpeechAsyncFormRequestTtsProvider -from .post_v3translate_async_form_request_selected_model import PostV3TranslateAsyncFormRequestSelectedModel -from .post_v3video_bots_async_form_request_asr_model import PostV3VideoBotsAsyncFormRequestAsrModel -from .post_v3video_bots_async_form_request_citation_style import PostV3VideoBotsAsyncFormRequestCitationStyle -from .post_v3video_bots_async_form_request_embedding_model import PostV3VideoBotsAsyncFormRequestEmbeddingModel -from .post_v3video_bots_async_form_request_lipsync_model import PostV3VideoBotsAsyncFormRequestLipsyncModel -from .post_v3video_bots_async_form_request_openai_tts_model import PostV3VideoBotsAsyncFormRequestOpenaiTtsModel -from .post_v3video_bots_async_form_request_openai_voice_name import PostV3VideoBotsAsyncFormRequestOpenaiVoiceName -from .post_v3video_bots_async_form_request_response_format_type import PostV3VideoBotsAsyncFormRequestResponseFormatType -from .post_v3video_bots_async_form_request_selected_model import PostV3VideoBotsAsyncFormRequestSelectedModel -from .post_v3video_bots_async_form_request_translation_model import PostV3VideoBotsAsyncFormRequestTranslationModel -from .post_v3video_bots_async_form_request_tts_provider import PostV3VideoBotsAsyncFormRequestTtsProvider from .prompt_tree_node import PromptTreeNode from .prompt_tree_node_prompt import PromptTreeNodePrompt from .qr_code_generator_page_output import QrCodeGeneratorPageOutput @@ -433,77 +312,6 @@ "ObjectInpaintingPageOutput", "ObjectInpaintingPageRequestSelectedModel", "ObjectInpaintingPageStatusResponse", - "PostV3ArtQrCodeAsyncFormRequestImagePromptControlnetModelsItem", - "PostV3ArtQrCodeAsyncFormRequestScheduler", - "PostV3ArtQrCodeAsyncFormRequestSelectedControlnetModelItem", - "PostV3ArtQrCodeAsyncFormRequestSelectedModel", - "PostV3AsrAsyncFormRequestOutputFormat", - "PostV3AsrAsyncFormRequestSelectedModel", - "PostV3AsrAsyncFormRequestTranslationModel", - "PostV3BulkEvalAsyncFormRequestResponseFormatType", - "PostV3BulkEvalAsyncFormRequestSelectedModel", - "PostV3CompareAiUpscalersAsyncFormRequestSelectedModelsItem", - "PostV3CompareLlmAsyncFormRequestResponseFormatType", - "PostV3CompareLlmAsyncFormRequestSelectedModelsItem", - "PostV3CompareText2ImgAsyncFormRequestScheduler", - "PostV3CompareText2ImgAsyncFormRequestSelectedModelsItem", - "PostV3DeforumSdAsyncFormRequestSelectedModel", - "PostV3DocExtractAsyncFormRequestResponseFormatType", - "PostV3DocExtractAsyncFormRequestSelectedAsrModel", - "PostV3DocExtractAsyncFormRequestSelectedModel", - "PostV3DocSearchAsyncFormRequestCitationStyle", - "PostV3DocSearchAsyncFormRequestEmbeddingModel", - "PostV3DocSearchAsyncFormRequestKeywordQuery", - "PostV3DocSearchAsyncFormRequestResponseFormatType", - "PostV3DocSearchAsyncFormRequestSelectedModel", - "PostV3DocSummaryAsyncFormRequestResponseFormatType", - "PostV3DocSummaryAsyncFormRequestSelectedAsrModel", - "PostV3DocSummaryAsyncFormRequestSelectedModel", - "PostV3EmailFaceInpaintingAsyncFormRequestSelectedModel", - "PostV3EmbeddingsAsyncFormRequestSelectedModel", - "PostV3FaceInpaintingAsyncFormRequestSelectedModel", - "PostV3GoogleGptAsyncFormRequestEmbeddingModel", - "PostV3GoogleGptAsyncFormRequestResponseFormatType", - "PostV3GoogleGptAsyncFormRequestSelectedModel", - "PostV3GoogleImageGenAsyncFormRequestSelectedModel", - "PostV3ImageSegmentationAsyncFormRequestSelectedModel", - "PostV3Img2ImgAsyncFormRequestSelectedControlnetModel", - "PostV3Img2ImgAsyncFormRequestSelectedControlnetModelItem", - "PostV3Img2ImgAsyncFormRequestSelectedModel", - "PostV3LipsyncAsyncFormRequestSelectedModel", - "PostV3LipsyncTtsAsyncFormRequestOpenaiTtsModel", - "PostV3LipsyncTtsAsyncFormRequestOpenaiVoiceName", - "PostV3LipsyncTtsAsyncFormRequestSelectedModel", - "PostV3LipsyncTtsAsyncFormRequestTtsProvider", - "PostV3ObjectInpaintingAsyncFormRequestSelectedModel", - "PostV3RelatedQnaMakerAsyncFormRequestEmbeddingModel", - "PostV3RelatedQnaMakerAsyncFormRequestResponseFormatType", - "PostV3RelatedQnaMakerAsyncFormRequestSelectedModel", - "PostV3RelatedQnaMakerDocAsyncFormRequestCitationStyle", - "PostV3RelatedQnaMakerDocAsyncFormRequestEmbeddingModel", - "PostV3RelatedQnaMakerDocAsyncFormRequestKeywordQuery", - "PostV3RelatedQnaMakerDocAsyncFormRequestResponseFormatType", - "PostV3RelatedQnaMakerDocAsyncFormRequestSelectedModel", - "PostV3SeoSummaryAsyncFormRequestResponseFormatType", - "PostV3SeoSummaryAsyncFormRequestSelectedModel", - "PostV3SmartGptAsyncFormRequestResponseFormatType", - "PostV3SmartGptAsyncFormRequestSelectedModel", - "PostV3SocialLookupEmailAsyncFormRequestResponseFormatType", - "PostV3SocialLookupEmailAsyncFormRequestSelectedModel", - "PostV3TextToSpeechAsyncFormRequestOpenaiTtsModel", - "PostV3TextToSpeechAsyncFormRequestOpenaiVoiceName", - "PostV3TextToSpeechAsyncFormRequestTtsProvider", - "PostV3TranslateAsyncFormRequestSelectedModel", - "PostV3VideoBotsAsyncFormRequestAsrModel", - "PostV3VideoBotsAsyncFormRequestCitationStyle", - "PostV3VideoBotsAsyncFormRequestEmbeddingModel", - "PostV3VideoBotsAsyncFormRequestLipsyncModel", - "PostV3VideoBotsAsyncFormRequestOpenaiTtsModel", - "PostV3VideoBotsAsyncFormRequestOpenaiVoiceName", - "PostV3VideoBotsAsyncFormRequestResponseFormatType", - "PostV3VideoBotsAsyncFormRequestSelectedModel", - "PostV3VideoBotsAsyncFormRequestTranslationModel", - "PostV3VideoBotsAsyncFormRequestTtsProvider", "PromptTreeNode", "PromptTreeNodePrompt", "QrCodeGeneratorPageOutput", diff --git a/src/gooey/types/post_v3art_qr_code_async_form_request_image_prompt_controlnet_models_item.py b/src/gooey/types/post_v3art_qr_code_async_form_request_image_prompt_controlnet_models_item.py deleted file mode 100644 index 6a287e6..0000000 --- a/src/gooey/types/post_v3art_qr_code_async_form_request_image_prompt_controlnet_models_item.py +++ /dev/null @@ -1,20 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3ArtQrCodeAsyncFormRequestImagePromptControlnetModelsItem = typing.Union[ - typing.Literal[ - "sd_controlnet_canny", - "sd_controlnet_depth", - "sd_controlnet_hed", - "sd_controlnet_mlsd", - "sd_controlnet_normal", - "sd_controlnet_openpose", - "sd_controlnet_scribble", - "sd_controlnet_seg", - "sd_controlnet_tile", - "sd_controlnet_brightness", - "control_v1p_sd15_qrcode_monster_v2", - ], - typing.Any, -] diff --git a/src/gooey/types/post_v3art_qr_code_async_form_request_scheduler.py b/src/gooey/types/post_v3art_qr_code_async_form_request_scheduler.py deleted file mode 100644 index fb1ad97..0000000 --- a/src/gooey/types/post_v3art_qr_code_async_form_request_scheduler.py +++ /dev/null @@ -1,23 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3ArtQrCodeAsyncFormRequestScheduler = typing.Union[ - typing.Literal[ - "singlestep_dpm_solver", - "multistep_dpm_solver", - "dpm_sde", - "dpm_discrete", - "dpm_discrete_ancestral", - "unipc", - "lms_discrete", - "heun", - "euler", - "euler_ancestral", - "pndm", - "ddpm", - "ddim", - "deis", - ], - typing.Any, -] diff --git a/src/gooey/types/post_v3art_qr_code_async_form_request_selected_controlnet_model_item.py b/src/gooey/types/post_v3art_qr_code_async_form_request_selected_controlnet_model_item.py deleted file mode 100644 index b36bff7..0000000 --- a/src/gooey/types/post_v3art_qr_code_async_form_request_selected_controlnet_model_item.py +++ /dev/null @@ -1,20 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3ArtQrCodeAsyncFormRequestSelectedControlnetModelItem = typing.Union[ - typing.Literal[ - "sd_controlnet_canny", - "sd_controlnet_depth", - "sd_controlnet_hed", - "sd_controlnet_mlsd", - "sd_controlnet_normal", - "sd_controlnet_openpose", - "sd_controlnet_scribble", - "sd_controlnet_seg", - "sd_controlnet_tile", - "sd_controlnet_brightness", - "control_v1p_sd15_qrcode_monster_v2", - ], - typing.Any, -] diff --git a/src/gooey/types/post_v3art_qr_code_async_form_request_selected_model.py b/src/gooey/types/post_v3art_qr_code_async_form_request_selected_model.py deleted file mode 100644 index 5334908..0000000 --- a/src/gooey/types/post_v3art_qr_code_async_form_request_selected_model.py +++ /dev/null @@ -1,22 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3ArtQrCodeAsyncFormRequestSelectedModel = typing.Union[ - typing.Literal[ - "dream_shaper", - "dreamlike_2", - "sd_2", - "sd_1_5", - "dall_e", - "dall_e_3", - "openjourney_2", - "openjourney", - "analog_diffusion", - "protogen_5_3", - "jack_qiao", - "rodent_diffusion_1_5", - "deepfloyd_if", - ], - typing.Any, -] diff --git a/src/gooey/types/post_v3asr_async_form_request_output_format.py b/src/gooey/types/post_v3asr_async_form_request_output_format.py deleted file mode 100644 index dad1d11..0000000 --- a/src/gooey/types/post_v3asr_async_form_request_output_format.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3AsrAsyncFormRequestOutputFormat = typing.Union[typing.Literal["text", "json", "srt", "vtt"], typing.Any] diff --git a/src/gooey/types/post_v3asr_async_form_request_selected_model.py b/src/gooey/types/post_v3asr_async_form_request_selected_model.py deleted file mode 100644 index 270207d..0000000 --- a/src/gooey/types/post_v3asr_async_form_request_selected_model.py +++ /dev/null @@ -1,23 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3AsrAsyncFormRequestSelectedModel = typing.Union[ - typing.Literal[ - "whisper_large_v2", - "whisper_large_v3", - "whisper_hindi_large_v2", - "whisper_telugu_large_v2", - "nemo_english", - "nemo_hindi", - "vakyansh_bhojpuri", - "gcp_v1", - "usm", - "deepgram", - "azure", - "seamless_m4t_v2", - "mms_1b_all", - "seamless_m4t", - ], - typing.Any, -] diff --git a/src/gooey/types/post_v3asr_async_form_request_translation_model.py b/src/gooey/types/post_v3asr_async_form_request_translation_model.py deleted file mode 100644 index 2a42d5c..0000000 --- a/src/gooey/types/post_v3asr_async_form_request_translation_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3AsrAsyncFormRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any] diff --git a/src/gooey/types/post_v3bulk_eval_async_form_request_response_format_type.py b/src/gooey/types/post_v3bulk_eval_async_form_request_response_format_type.py deleted file mode 100644 index 05fa75a..0000000 --- a/src/gooey/types/post_v3bulk_eval_async_form_request_response_format_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3BulkEvalAsyncFormRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/post_v3bulk_eval_async_form_request_selected_model.py b/src/gooey/types/post_v3bulk_eval_async_form_request_selected_model.py deleted file mode 100644 index d70df69..0000000 --- a/src/gooey/types/post_v3bulk_eval_async_form_request_selected_model.py +++ /dev/null @@ -1,47 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3BulkEvalAsyncFormRequestSelectedModel = typing.Union[ - typing.Literal[ - "gpt_4_o", - "gpt_4_o_mini", - "chatgpt_4_o", - "gpt_4_turbo_vision", - "gpt_4_vision", - "gpt_4_turbo", - "gpt_4", - "gpt_4_32k", - "gpt_3_5_turbo", - "gpt_3_5_turbo_16k", - "gpt_3_5_turbo_instruct", - "llama3_70b", - "llama_3_groq_70b_tool_use", - "llama3_8b", - "llama_3_groq_8b_tool_use", - "llama2_70b_chat", - "mixtral_8x7b_instruct_0_1", - "gemma_2_9b_it", - "gemma_7b_it", - "gemini_1_5_flash", - "gemini_1_5_pro", - "gemini_1_pro_vision", - "gemini_1_pro", - "palm2_chat", - "palm2_text", - "claude_3_5_sonnet", - "claude_3_opus", - "claude_3_sonnet", - "claude_3_haiku", - "sea_lion_7b_instruct", - "llama3_8b_cpt_sea_lion_v2_instruct", - "sarvam_2b", - "text_davinci_003", - "text_davinci_002", - "code_davinci_002", - "text_curie_001", - "text_babbage_001", - "text_ada_001", - ], - typing.Any, -] diff --git a/src/gooey/types/post_v3compare_ai_upscalers_async_form_request_selected_models_item.py b/src/gooey/types/post_v3compare_ai_upscalers_async_form_request_selected_models_item.py deleted file mode 100644 index 7d53c0d..0000000 --- a/src/gooey/types/post_v3compare_ai_upscalers_async_form_request_selected_models_item.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3CompareAiUpscalersAsyncFormRequestSelectedModelsItem = typing.Union[ - typing.Literal["gfpgan_1_4", "real_esrgan_x2", "sd_x4", "real_esrgan", "gfpgan"], typing.Any -] diff --git a/src/gooey/types/post_v3compare_llm_async_form_request_response_format_type.py b/src/gooey/types/post_v3compare_llm_async_form_request_response_format_type.py deleted file mode 100644 index b7f1372..0000000 --- a/src/gooey/types/post_v3compare_llm_async_form_request_response_format_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3CompareLlmAsyncFormRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/post_v3compare_llm_async_form_request_selected_models_item.py b/src/gooey/types/post_v3compare_llm_async_form_request_selected_models_item.py deleted file mode 100644 index 58f240c..0000000 --- a/src/gooey/types/post_v3compare_llm_async_form_request_selected_models_item.py +++ /dev/null @@ -1,47 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3CompareLlmAsyncFormRequestSelectedModelsItem = typing.Union[ - typing.Literal[ - "gpt_4_o", - "gpt_4_o_mini", - "chatgpt_4_o", - "gpt_4_turbo_vision", - "gpt_4_vision", - "gpt_4_turbo", - "gpt_4", - "gpt_4_32k", - "gpt_3_5_turbo", - "gpt_3_5_turbo_16k", - "gpt_3_5_turbo_instruct", - "llama3_70b", - "llama_3_groq_70b_tool_use", - "llama3_8b", - "llama_3_groq_8b_tool_use", - "llama2_70b_chat", - "mixtral_8x7b_instruct_0_1", - "gemma_2_9b_it", - "gemma_7b_it", - "gemini_1_5_flash", - "gemini_1_5_pro", - "gemini_1_pro_vision", - "gemini_1_pro", - "palm2_chat", - "palm2_text", - "claude_3_5_sonnet", - "claude_3_opus", - "claude_3_sonnet", - "claude_3_haiku", - "sea_lion_7b_instruct", - "llama3_8b_cpt_sea_lion_v2_instruct", - "sarvam_2b", - "text_davinci_003", - "text_davinci_002", - "code_davinci_002", - "text_curie_001", - "text_babbage_001", - "text_ada_001", - ], - typing.Any, -] diff --git a/src/gooey/types/post_v3compare_text2img_async_form_request_scheduler.py b/src/gooey/types/post_v3compare_text2img_async_form_request_scheduler.py deleted file mode 100644 index ea82032..0000000 --- a/src/gooey/types/post_v3compare_text2img_async_form_request_scheduler.py +++ /dev/null @@ -1,23 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3CompareText2ImgAsyncFormRequestScheduler = typing.Union[ - typing.Literal[ - "singlestep_dpm_solver", - "multistep_dpm_solver", - "dpm_sde", - "dpm_discrete", - "dpm_discrete_ancestral", - "unipc", - "lms_discrete", - "heun", - "euler", - "euler_ancestral", - "pndm", - "ddpm", - "ddim", - "deis", - ], - typing.Any, -] diff --git a/src/gooey/types/post_v3compare_text2img_async_form_request_selected_models_item.py b/src/gooey/types/post_v3compare_text2img_async_form_request_selected_models_item.py deleted file mode 100644 index abe78ed..0000000 --- a/src/gooey/types/post_v3compare_text2img_async_form_request_selected_models_item.py +++ /dev/null @@ -1,22 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3CompareText2ImgAsyncFormRequestSelectedModelsItem = typing.Union[ - typing.Literal[ - "dream_shaper", - "dreamlike_2", - "sd_2", - "sd_1_5", - "dall_e", - "dall_e_3", - "openjourney_2", - "openjourney", - "analog_diffusion", - "protogen_5_3", - "jack_qiao", - "rodent_diffusion_1_5", - "deepfloyd_if", - ], - typing.Any, -] diff --git a/src/gooey/types/post_v3deforum_sd_async_form_request_selected_model.py b/src/gooey/types/post_v3deforum_sd_async_form_request_selected_model.py deleted file mode 100644 index 8561b6f..0000000 --- a/src/gooey/types/post_v3deforum_sd_async_form_request_selected_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3DeforumSdAsyncFormRequestSelectedModel = typing.Union[typing.Literal["protogen_2_2", "epicdream"], typing.Any] diff --git a/src/gooey/types/post_v3doc_extract_async_form_request_response_format_type.py b/src/gooey/types/post_v3doc_extract_async_form_request_response_format_type.py deleted file mode 100644 index 759e46c..0000000 --- a/src/gooey/types/post_v3doc_extract_async_form_request_response_format_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3DocExtractAsyncFormRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/post_v3doc_extract_async_form_request_selected_asr_model.py b/src/gooey/types/post_v3doc_extract_async_form_request_selected_asr_model.py deleted file mode 100644 index 7ac96ae..0000000 --- a/src/gooey/types/post_v3doc_extract_async_form_request_selected_asr_model.py +++ /dev/null @@ -1,23 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3DocExtractAsyncFormRequestSelectedAsrModel = typing.Union[ - typing.Literal[ - "whisper_large_v2", - "whisper_large_v3", - "whisper_hindi_large_v2", - "whisper_telugu_large_v2", - "nemo_english", - "nemo_hindi", - "vakyansh_bhojpuri", - "gcp_v1", - "usm", - "deepgram", - "azure", - "seamless_m4t_v2", - "mms_1b_all", - "seamless_m4t", - ], - typing.Any, -] diff --git a/src/gooey/types/post_v3doc_extract_async_form_request_selected_model.py b/src/gooey/types/post_v3doc_extract_async_form_request_selected_model.py deleted file mode 100644 index 7d72e68..0000000 --- a/src/gooey/types/post_v3doc_extract_async_form_request_selected_model.py +++ /dev/null @@ -1,47 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3DocExtractAsyncFormRequestSelectedModel = typing.Union[ - typing.Literal[ - "gpt_4_o", - "gpt_4_o_mini", - "chatgpt_4_o", - "gpt_4_turbo_vision", - "gpt_4_vision", - "gpt_4_turbo", - "gpt_4", - "gpt_4_32k", - "gpt_3_5_turbo", - "gpt_3_5_turbo_16k", - "gpt_3_5_turbo_instruct", - "llama3_70b", - "llama_3_groq_70b_tool_use", - "llama3_8b", - "llama_3_groq_8b_tool_use", - "llama2_70b_chat", - "mixtral_8x7b_instruct_0_1", - "gemma_2_9b_it", - "gemma_7b_it", - "gemini_1_5_flash", - "gemini_1_5_pro", - "gemini_1_pro_vision", - "gemini_1_pro", - "palm2_chat", - "palm2_text", - "claude_3_5_sonnet", - "claude_3_opus", - "claude_3_sonnet", - "claude_3_haiku", - "sea_lion_7b_instruct", - "llama3_8b_cpt_sea_lion_v2_instruct", - "sarvam_2b", - "text_davinci_003", - "text_davinci_002", - "code_davinci_002", - "text_curie_001", - "text_babbage_001", - "text_ada_001", - ], - typing.Any, -] diff --git a/src/gooey/types/post_v3doc_search_async_form_request_citation_style.py b/src/gooey/types/post_v3doc_search_async_form_request_citation_style.py deleted file mode 100644 index cf1bb3c..0000000 --- a/src/gooey/types/post_v3doc_search_async_form_request_citation_style.py +++ /dev/null @@ -1,25 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3DocSearchAsyncFormRequestCitationStyle = typing.Union[ - typing.Literal[ - "number", - "title", - "url", - "symbol", - "markdown", - "html", - "slack_mrkdwn", - "plaintext", - "number_markdown", - "number_html", - "number_slack_mrkdwn", - "number_plaintext", - "symbol_markdown", - "symbol_html", - "symbol_slack_mrkdwn", - "symbol_plaintext", - ], - typing.Any, -] diff --git a/src/gooey/types/post_v3doc_search_async_form_request_embedding_model.py b/src/gooey/types/post_v3doc_search_async_form_request_embedding_model.py deleted file mode 100644 index 642358a..0000000 --- a/src/gooey/types/post_v3doc_search_async_form_request_embedding_model.py +++ /dev/null @@ -1,18 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3DocSearchAsyncFormRequestEmbeddingModel = typing.Union[ - typing.Literal[ - "openai_3_large", - "openai_3_small", - "openai_ada_2", - "e5_large_v2", - "e5_base_v2", - "multilingual_e5_base", - "multilingual_e5_large", - "gte_large", - "gte_base", - ], - typing.Any, -] diff --git a/src/gooey/types/post_v3doc_search_async_form_request_keyword_query.py b/src/gooey/types/post_v3doc_search_async_form_request_keyword_query.py deleted file mode 100644 index 47e1ead..0000000 --- a/src/gooey/types/post_v3doc_search_async_form_request_keyword_query.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3DocSearchAsyncFormRequestKeywordQuery = typing.Union[str, typing.List[str]] diff --git a/src/gooey/types/post_v3doc_search_async_form_request_response_format_type.py b/src/gooey/types/post_v3doc_search_async_form_request_response_format_type.py deleted file mode 100644 index 852b2c4..0000000 --- a/src/gooey/types/post_v3doc_search_async_form_request_response_format_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3DocSearchAsyncFormRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/post_v3doc_search_async_form_request_selected_model.py b/src/gooey/types/post_v3doc_search_async_form_request_selected_model.py deleted file mode 100644 index dff6941..0000000 --- a/src/gooey/types/post_v3doc_search_async_form_request_selected_model.py +++ /dev/null @@ -1,47 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3DocSearchAsyncFormRequestSelectedModel = typing.Union[ - typing.Literal[ - "gpt_4_o", - "gpt_4_o_mini", - "chatgpt_4_o", - "gpt_4_turbo_vision", - "gpt_4_vision", - "gpt_4_turbo", - "gpt_4", - "gpt_4_32k", - "gpt_3_5_turbo", - "gpt_3_5_turbo_16k", - "gpt_3_5_turbo_instruct", - "llama3_70b", - "llama_3_groq_70b_tool_use", - "llama3_8b", - "llama_3_groq_8b_tool_use", - "llama2_70b_chat", - "mixtral_8x7b_instruct_0_1", - "gemma_2_9b_it", - "gemma_7b_it", - "gemini_1_5_flash", - "gemini_1_5_pro", - "gemini_1_pro_vision", - "gemini_1_pro", - "palm2_chat", - "palm2_text", - "claude_3_5_sonnet", - "claude_3_opus", - "claude_3_sonnet", - "claude_3_haiku", - "sea_lion_7b_instruct", - "llama3_8b_cpt_sea_lion_v2_instruct", - "sarvam_2b", - "text_davinci_003", - "text_davinci_002", - "code_davinci_002", - "text_curie_001", - "text_babbage_001", - "text_ada_001", - ], - typing.Any, -] diff --git a/src/gooey/types/post_v3doc_summary_async_form_request_response_format_type.py b/src/gooey/types/post_v3doc_summary_async_form_request_response_format_type.py deleted file mode 100644 index 6d4d724..0000000 --- a/src/gooey/types/post_v3doc_summary_async_form_request_response_format_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3DocSummaryAsyncFormRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/post_v3doc_summary_async_form_request_selected_asr_model.py b/src/gooey/types/post_v3doc_summary_async_form_request_selected_asr_model.py deleted file mode 100644 index a696f62..0000000 --- a/src/gooey/types/post_v3doc_summary_async_form_request_selected_asr_model.py +++ /dev/null @@ -1,23 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3DocSummaryAsyncFormRequestSelectedAsrModel = typing.Union[ - typing.Literal[ - "whisper_large_v2", - "whisper_large_v3", - "whisper_hindi_large_v2", - "whisper_telugu_large_v2", - "nemo_english", - "nemo_hindi", - "vakyansh_bhojpuri", - "gcp_v1", - "usm", - "deepgram", - "azure", - "seamless_m4t_v2", - "mms_1b_all", - "seamless_m4t", - ], - typing.Any, -] diff --git a/src/gooey/types/post_v3doc_summary_async_form_request_selected_model.py b/src/gooey/types/post_v3doc_summary_async_form_request_selected_model.py deleted file mode 100644 index 599fac8..0000000 --- a/src/gooey/types/post_v3doc_summary_async_form_request_selected_model.py +++ /dev/null @@ -1,47 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3DocSummaryAsyncFormRequestSelectedModel = typing.Union[ - typing.Literal[ - "gpt_4_o", - "gpt_4_o_mini", - "chatgpt_4_o", - "gpt_4_turbo_vision", - "gpt_4_vision", - "gpt_4_turbo", - "gpt_4", - "gpt_4_32k", - "gpt_3_5_turbo", - "gpt_3_5_turbo_16k", - "gpt_3_5_turbo_instruct", - "llama3_70b", - "llama_3_groq_70b_tool_use", - "llama3_8b", - "llama_3_groq_8b_tool_use", - "llama2_70b_chat", - "mixtral_8x7b_instruct_0_1", - "gemma_2_9b_it", - "gemma_7b_it", - "gemini_1_5_flash", - "gemini_1_5_pro", - "gemini_1_pro_vision", - "gemini_1_pro", - "palm2_chat", - "palm2_text", - "claude_3_5_sonnet", - "claude_3_opus", - "claude_3_sonnet", - "claude_3_haiku", - "sea_lion_7b_instruct", - "llama3_8b_cpt_sea_lion_v2_instruct", - "sarvam_2b", - "text_davinci_003", - "text_davinci_002", - "code_davinci_002", - "text_curie_001", - "text_babbage_001", - "text_ada_001", - ], - typing.Any, -] diff --git a/src/gooey/types/post_v3email_face_inpainting_async_form_request_selected_model.py b/src/gooey/types/post_v3email_face_inpainting_async_form_request_selected_model.py deleted file mode 100644 index 1ae0620..0000000 --- a/src/gooey/types/post_v3email_face_inpainting_async_form_request_selected_model.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3EmailFaceInpaintingAsyncFormRequestSelectedModel = typing.Union[ - typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any -] diff --git a/src/gooey/types/post_v3embeddings_async_form_request_selected_model.py b/src/gooey/types/post_v3embeddings_async_form_request_selected_model.py deleted file mode 100644 index c5bdb16..0000000 --- a/src/gooey/types/post_v3embeddings_async_form_request_selected_model.py +++ /dev/null @@ -1,18 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3EmbeddingsAsyncFormRequestSelectedModel = typing.Union[ - typing.Literal[ - "openai_3_large", - "openai_3_small", - "openai_ada_2", - "e5_large_v2", - "e5_base_v2", - "multilingual_e5_base", - "multilingual_e5_large", - "gte_large", - "gte_base", - ], - typing.Any, -] diff --git a/src/gooey/types/post_v3face_inpainting_async_form_request_selected_model.py b/src/gooey/types/post_v3face_inpainting_async_form_request_selected_model.py deleted file mode 100644 index 2824ac1..0000000 --- a/src/gooey/types/post_v3face_inpainting_async_form_request_selected_model.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3FaceInpaintingAsyncFormRequestSelectedModel = typing.Union[ - typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any -] diff --git a/src/gooey/types/post_v3google_gpt_async_form_request_embedding_model.py b/src/gooey/types/post_v3google_gpt_async_form_request_embedding_model.py deleted file mode 100644 index f6a3714..0000000 --- a/src/gooey/types/post_v3google_gpt_async_form_request_embedding_model.py +++ /dev/null @@ -1,18 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3GoogleGptAsyncFormRequestEmbeddingModel = typing.Union[ - typing.Literal[ - "openai_3_large", - "openai_3_small", - "openai_ada_2", - "e5_large_v2", - "e5_base_v2", - "multilingual_e5_base", - "multilingual_e5_large", - "gte_large", - "gte_base", - ], - typing.Any, -] diff --git a/src/gooey/types/post_v3google_gpt_async_form_request_response_format_type.py b/src/gooey/types/post_v3google_gpt_async_form_request_response_format_type.py deleted file mode 100644 index b9e609e..0000000 --- a/src/gooey/types/post_v3google_gpt_async_form_request_response_format_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3GoogleGptAsyncFormRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/post_v3google_gpt_async_form_request_selected_model.py b/src/gooey/types/post_v3google_gpt_async_form_request_selected_model.py deleted file mode 100644 index da55a8d..0000000 --- a/src/gooey/types/post_v3google_gpt_async_form_request_selected_model.py +++ /dev/null @@ -1,47 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3GoogleGptAsyncFormRequestSelectedModel = typing.Union[ - typing.Literal[ - "gpt_4_o", - "gpt_4_o_mini", - "chatgpt_4_o", - "gpt_4_turbo_vision", - "gpt_4_vision", - "gpt_4_turbo", - "gpt_4", - "gpt_4_32k", - "gpt_3_5_turbo", - "gpt_3_5_turbo_16k", - "gpt_3_5_turbo_instruct", - "llama3_70b", - "llama_3_groq_70b_tool_use", - "llama3_8b", - "llama_3_groq_8b_tool_use", - "llama2_70b_chat", - "mixtral_8x7b_instruct_0_1", - "gemma_2_9b_it", - "gemma_7b_it", - "gemini_1_5_flash", - "gemini_1_5_pro", - "gemini_1_pro_vision", - "gemini_1_pro", - "palm2_chat", - "palm2_text", - "claude_3_5_sonnet", - "claude_3_opus", - "claude_3_sonnet", - "claude_3_haiku", - "sea_lion_7b_instruct", - "llama3_8b_cpt_sea_lion_v2_instruct", - "sarvam_2b", - "text_davinci_003", - "text_davinci_002", - "code_davinci_002", - "text_curie_001", - "text_babbage_001", - "text_ada_001", - ], - typing.Any, -] diff --git a/src/gooey/types/post_v3google_image_gen_async_form_request_selected_model.py b/src/gooey/types/post_v3google_image_gen_async_form_request_selected_model.py deleted file mode 100644 index 4cc02ae..0000000 --- a/src/gooey/types/post_v3google_image_gen_async_form_request_selected_model.py +++ /dev/null @@ -1,21 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3GoogleImageGenAsyncFormRequestSelectedModel = typing.Union[ - typing.Literal[ - "dream_shaper", - "dreamlike_2", - "sd_2", - "sd_1_5", - "dall_e", - "instruct_pix2pix", - "openjourney_2", - "openjourney", - "analog_diffusion", - "protogen_5_3", - "jack_qiao", - "rodent_diffusion_1_5", - ], - typing.Any, -] diff --git a/src/gooey/types/post_v3image_segmentation_async_form_request_selected_model.py b/src/gooey/types/post_v3image_segmentation_async_form_request_selected_model.py deleted file mode 100644 index 2ce98fe..0000000 --- a/src/gooey/types/post_v3image_segmentation_async_form_request_selected_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3ImageSegmentationAsyncFormRequestSelectedModel = typing.Union[typing.Literal["dis", "u2net"], typing.Any] diff --git a/src/gooey/types/post_v3img2img_async_form_request_selected_controlnet_model.py b/src/gooey/types/post_v3img2img_async_form_request_selected_controlnet_model.py deleted file mode 100644 index 8605c47..0000000 --- a/src/gooey/types/post_v3img2img_async_form_request_selected_controlnet_model.py +++ /dev/null @@ -1,21 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from .post_v3img2img_async_form_request_selected_controlnet_model_item import ( - PostV3Img2ImgAsyncFormRequestSelectedControlnetModelItem, -) - -PostV3Img2ImgAsyncFormRequestSelectedControlnetModel = typing.Union[ - typing.List[PostV3Img2ImgAsyncFormRequestSelectedControlnetModelItem], - typing.Literal["sd_controlnet_canny"], - typing.Literal["sd_controlnet_depth"], - typing.Literal["sd_controlnet_hed"], - typing.Literal["sd_controlnet_mlsd"], - typing.Literal["sd_controlnet_normal"], - typing.Literal["sd_controlnet_openpose"], - typing.Literal["sd_controlnet_scribble"], - typing.Literal["sd_controlnet_seg"], - typing.Literal["sd_controlnet_tile"], - typing.Literal["sd_controlnet_brightness"], - typing.Literal["control_v1p_sd15_qrcode_monster_v2"], -] diff --git a/src/gooey/types/post_v3img2img_async_form_request_selected_controlnet_model_item.py b/src/gooey/types/post_v3img2img_async_form_request_selected_controlnet_model_item.py deleted file mode 100644 index e56303b..0000000 --- a/src/gooey/types/post_v3img2img_async_form_request_selected_controlnet_model_item.py +++ /dev/null @@ -1,20 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3Img2ImgAsyncFormRequestSelectedControlnetModelItem = typing.Union[ - typing.Literal[ - "sd_controlnet_canny", - "sd_controlnet_depth", - "sd_controlnet_hed", - "sd_controlnet_mlsd", - "sd_controlnet_normal", - "sd_controlnet_openpose", - "sd_controlnet_scribble", - "sd_controlnet_seg", - "sd_controlnet_tile", - "sd_controlnet_brightness", - "control_v1p_sd15_qrcode_monster_v2", - ], - typing.Any, -] diff --git a/src/gooey/types/post_v3img2img_async_form_request_selected_model.py b/src/gooey/types/post_v3img2img_async_form_request_selected_model.py deleted file mode 100644 index 6218c7b..0000000 --- a/src/gooey/types/post_v3img2img_async_form_request_selected_model.py +++ /dev/null @@ -1,21 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3Img2ImgAsyncFormRequestSelectedModel = typing.Union[ - typing.Literal[ - "dream_shaper", - "dreamlike_2", - "sd_2", - "sd_1_5", - "dall_e", - "instruct_pix2pix", - "openjourney_2", - "openjourney", - "analog_diffusion", - "protogen_5_3", - "jack_qiao", - "rodent_diffusion_1_5", - ], - typing.Any, -] diff --git a/src/gooey/types/post_v3lipsync_async_form_request_selected_model.py b/src/gooey/types/post_v3lipsync_async_form_request_selected_model.py deleted file mode 100644 index 19c0255..0000000 --- a/src/gooey/types/post_v3lipsync_async_form_request_selected_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3LipsyncAsyncFormRequestSelectedModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any] diff --git a/src/gooey/types/post_v3lipsync_tts_async_form_request_openai_tts_model.py b/src/gooey/types/post_v3lipsync_tts_async_form_request_openai_tts_model.py deleted file mode 100644 index a0eff55..0000000 --- a/src/gooey/types/post_v3lipsync_tts_async_form_request_openai_tts_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3LipsyncTtsAsyncFormRequestOpenaiTtsModel = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any] diff --git a/src/gooey/types/post_v3lipsync_tts_async_form_request_openai_voice_name.py b/src/gooey/types/post_v3lipsync_tts_async_form_request_openai_voice_name.py deleted file mode 100644 index 2bbbd54..0000000 --- a/src/gooey/types/post_v3lipsync_tts_async_form_request_openai_voice_name.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3LipsyncTtsAsyncFormRequestOpenaiVoiceName = typing.Union[ - typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any -] diff --git a/src/gooey/types/post_v3lipsync_tts_async_form_request_selected_model.py b/src/gooey/types/post_v3lipsync_tts_async_form_request_selected_model.py deleted file mode 100644 index bcfe20e..0000000 --- a/src/gooey/types/post_v3lipsync_tts_async_form_request_selected_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3LipsyncTtsAsyncFormRequestSelectedModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any] diff --git a/src/gooey/types/post_v3lipsync_tts_async_form_request_tts_provider.py b/src/gooey/types/post_v3lipsync_tts_async_form_request_tts_provider.py deleted file mode 100644 index 25be098..0000000 --- a/src/gooey/types/post_v3lipsync_tts_async_form_request_tts_provider.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3LipsyncTtsAsyncFormRequestTtsProvider = typing.Union[ - typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any -] diff --git a/src/gooey/types/post_v3object_inpainting_async_form_request_selected_model.py b/src/gooey/types/post_v3object_inpainting_async_form_request_selected_model.py deleted file mode 100644 index 8dec227..0000000 --- a/src/gooey/types/post_v3object_inpainting_async_form_request_selected_model.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3ObjectInpaintingAsyncFormRequestSelectedModel = typing.Union[ - typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any -] diff --git a/src/gooey/types/post_v3related_qna_maker_async_form_request_embedding_model.py b/src/gooey/types/post_v3related_qna_maker_async_form_request_embedding_model.py deleted file mode 100644 index 9390765..0000000 --- a/src/gooey/types/post_v3related_qna_maker_async_form_request_embedding_model.py +++ /dev/null @@ -1,18 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3RelatedQnaMakerAsyncFormRequestEmbeddingModel = typing.Union[ - typing.Literal[ - "openai_3_large", - "openai_3_small", - "openai_ada_2", - "e5_large_v2", - "e5_base_v2", - "multilingual_e5_base", - "multilingual_e5_large", - "gte_large", - "gte_base", - ], - typing.Any, -] diff --git a/src/gooey/types/post_v3related_qna_maker_async_form_request_response_format_type.py b/src/gooey/types/post_v3related_qna_maker_async_form_request_response_format_type.py deleted file mode 100644 index 9e6ca22..0000000 --- a/src/gooey/types/post_v3related_qna_maker_async_form_request_response_format_type.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3RelatedQnaMakerAsyncFormRequestResponseFormatType = typing.Union[ - typing.Literal["text", "json_object"], typing.Any -] diff --git a/src/gooey/types/post_v3related_qna_maker_async_form_request_selected_model.py b/src/gooey/types/post_v3related_qna_maker_async_form_request_selected_model.py deleted file mode 100644 index 1000455..0000000 --- a/src/gooey/types/post_v3related_qna_maker_async_form_request_selected_model.py +++ /dev/null @@ -1,47 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3RelatedQnaMakerAsyncFormRequestSelectedModel = typing.Union[ - typing.Literal[ - "gpt_4_o", - "gpt_4_o_mini", - "chatgpt_4_o", - "gpt_4_turbo_vision", - "gpt_4_vision", - "gpt_4_turbo", - "gpt_4", - "gpt_4_32k", - "gpt_3_5_turbo", - "gpt_3_5_turbo_16k", - "gpt_3_5_turbo_instruct", - "llama3_70b", - "llama_3_groq_70b_tool_use", - "llama3_8b", - "llama_3_groq_8b_tool_use", - "llama2_70b_chat", - "mixtral_8x7b_instruct_0_1", - "gemma_2_9b_it", - "gemma_7b_it", - "gemini_1_5_flash", - "gemini_1_5_pro", - "gemini_1_pro_vision", - "gemini_1_pro", - "palm2_chat", - "palm2_text", - "claude_3_5_sonnet", - "claude_3_opus", - "claude_3_sonnet", - "claude_3_haiku", - "sea_lion_7b_instruct", - "llama3_8b_cpt_sea_lion_v2_instruct", - "sarvam_2b", - "text_davinci_003", - "text_davinci_002", - "code_davinci_002", - "text_curie_001", - "text_babbage_001", - "text_ada_001", - ], - typing.Any, -] diff --git a/src/gooey/types/post_v3related_qna_maker_doc_async_form_request_citation_style.py b/src/gooey/types/post_v3related_qna_maker_doc_async_form_request_citation_style.py deleted file mode 100644 index f391080..0000000 --- a/src/gooey/types/post_v3related_qna_maker_doc_async_form_request_citation_style.py +++ /dev/null @@ -1,25 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3RelatedQnaMakerDocAsyncFormRequestCitationStyle = typing.Union[ - typing.Literal[ - "number", - "title", - "url", - "symbol", - "markdown", - "html", - "slack_mrkdwn", - "plaintext", - "number_markdown", - "number_html", - "number_slack_mrkdwn", - "number_plaintext", - "symbol_markdown", - "symbol_html", - "symbol_slack_mrkdwn", - "symbol_plaintext", - ], - typing.Any, -] diff --git a/src/gooey/types/post_v3related_qna_maker_doc_async_form_request_embedding_model.py b/src/gooey/types/post_v3related_qna_maker_doc_async_form_request_embedding_model.py deleted file mode 100644 index 3af393d..0000000 --- a/src/gooey/types/post_v3related_qna_maker_doc_async_form_request_embedding_model.py +++ /dev/null @@ -1,18 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3RelatedQnaMakerDocAsyncFormRequestEmbeddingModel = typing.Union[ - typing.Literal[ - "openai_3_large", - "openai_3_small", - "openai_ada_2", - "e5_large_v2", - "e5_base_v2", - "multilingual_e5_base", - "multilingual_e5_large", - "gte_large", - "gte_base", - ], - typing.Any, -] diff --git a/src/gooey/types/post_v3related_qna_maker_doc_async_form_request_keyword_query.py b/src/gooey/types/post_v3related_qna_maker_doc_async_form_request_keyword_query.py deleted file mode 100644 index 3268b32..0000000 --- a/src/gooey/types/post_v3related_qna_maker_doc_async_form_request_keyword_query.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3RelatedQnaMakerDocAsyncFormRequestKeywordQuery = typing.Union[str, typing.List[str]] diff --git a/src/gooey/types/post_v3related_qna_maker_doc_async_form_request_response_format_type.py b/src/gooey/types/post_v3related_qna_maker_doc_async_form_request_response_format_type.py deleted file mode 100644 index 732cda0..0000000 --- a/src/gooey/types/post_v3related_qna_maker_doc_async_form_request_response_format_type.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3RelatedQnaMakerDocAsyncFormRequestResponseFormatType = typing.Union[ - typing.Literal["text", "json_object"], typing.Any -] diff --git a/src/gooey/types/post_v3related_qna_maker_doc_async_form_request_selected_model.py b/src/gooey/types/post_v3related_qna_maker_doc_async_form_request_selected_model.py deleted file mode 100644 index 7fbdfaf..0000000 --- a/src/gooey/types/post_v3related_qna_maker_doc_async_form_request_selected_model.py +++ /dev/null @@ -1,47 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3RelatedQnaMakerDocAsyncFormRequestSelectedModel = typing.Union[ - typing.Literal[ - "gpt_4_o", - "gpt_4_o_mini", - "chatgpt_4_o", - "gpt_4_turbo_vision", - "gpt_4_vision", - "gpt_4_turbo", - "gpt_4", - "gpt_4_32k", - "gpt_3_5_turbo", - "gpt_3_5_turbo_16k", - "gpt_3_5_turbo_instruct", - "llama3_70b", - "llama_3_groq_70b_tool_use", - "llama3_8b", - "llama_3_groq_8b_tool_use", - "llama2_70b_chat", - "mixtral_8x7b_instruct_0_1", - "gemma_2_9b_it", - "gemma_7b_it", - "gemini_1_5_flash", - "gemini_1_5_pro", - "gemini_1_pro_vision", - "gemini_1_pro", - "palm2_chat", - "palm2_text", - "claude_3_5_sonnet", - "claude_3_opus", - "claude_3_sonnet", - "claude_3_haiku", - "sea_lion_7b_instruct", - "llama3_8b_cpt_sea_lion_v2_instruct", - "sarvam_2b", - "text_davinci_003", - "text_davinci_002", - "code_davinci_002", - "text_curie_001", - "text_babbage_001", - "text_ada_001", - ], - typing.Any, -] diff --git a/src/gooey/types/post_v3seo_summary_async_form_request_response_format_type.py b/src/gooey/types/post_v3seo_summary_async_form_request_response_format_type.py deleted file mode 100644 index a16607e..0000000 --- a/src/gooey/types/post_v3seo_summary_async_form_request_response_format_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3SeoSummaryAsyncFormRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/post_v3seo_summary_async_form_request_selected_model.py b/src/gooey/types/post_v3seo_summary_async_form_request_selected_model.py deleted file mode 100644 index eb67839..0000000 --- a/src/gooey/types/post_v3seo_summary_async_form_request_selected_model.py +++ /dev/null @@ -1,47 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3SeoSummaryAsyncFormRequestSelectedModel = typing.Union[ - typing.Literal[ - "gpt_4_o", - "gpt_4_o_mini", - "chatgpt_4_o", - "gpt_4_turbo_vision", - "gpt_4_vision", - "gpt_4_turbo", - "gpt_4", - "gpt_4_32k", - "gpt_3_5_turbo", - "gpt_3_5_turbo_16k", - "gpt_3_5_turbo_instruct", - "llama3_70b", - "llama_3_groq_70b_tool_use", - "llama3_8b", - "llama_3_groq_8b_tool_use", - "llama2_70b_chat", - "mixtral_8x7b_instruct_0_1", - "gemma_2_9b_it", - "gemma_7b_it", - "gemini_1_5_flash", - "gemini_1_5_pro", - "gemini_1_pro_vision", - "gemini_1_pro", - "palm2_chat", - "palm2_text", - "claude_3_5_sonnet", - "claude_3_opus", - "claude_3_sonnet", - "claude_3_haiku", - "sea_lion_7b_instruct", - "llama3_8b_cpt_sea_lion_v2_instruct", - "sarvam_2b", - "text_davinci_003", - "text_davinci_002", - "code_davinci_002", - "text_curie_001", - "text_babbage_001", - "text_ada_001", - ], - typing.Any, -] diff --git a/src/gooey/types/post_v3smart_gpt_async_form_request_response_format_type.py b/src/gooey/types/post_v3smart_gpt_async_form_request_response_format_type.py deleted file mode 100644 index 2ec153e..0000000 --- a/src/gooey/types/post_v3smart_gpt_async_form_request_response_format_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3SmartGptAsyncFormRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/post_v3smart_gpt_async_form_request_selected_model.py b/src/gooey/types/post_v3smart_gpt_async_form_request_selected_model.py deleted file mode 100644 index 70d34bc..0000000 --- a/src/gooey/types/post_v3smart_gpt_async_form_request_selected_model.py +++ /dev/null @@ -1,47 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3SmartGptAsyncFormRequestSelectedModel = typing.Union[ - typing.Literal[ - "gpt_4_o", - "gpt_4_o_mini", - "chatgpt_4_o", - "gpt_4_turbo_vision", - "gpt_4_vision", - "gpt_4_turbo", - "gpt_4", - "gpt_4_32k", - "gpt_3_5_turbo", - "gpt_3_5_turbo_16k", - "gpt_3_5_turbo_instruct", - "llama3_70b", - "llama_3_groq_70b_tool_use", - "llama3_8b", - "llama_3_groq_8b_tool_use", - "llama2_70b_chat", - "mixtral_8x7b_instruct_0_1", - "gemma_2_9b_it", - "gemma_7b_it", - "gemini_1_5_flash", - "gemini_1_5_pro", - "gemini_1_pro_vision", - "gemini_1_pro", - "palm2_chat", - "palm2_text", - "claude_3_5_sonnet", - "claude_3_opus", - "claude_3_sonnet", - "claude_3_haiku", - "sea_lion_7b_instruct", - "llama3_8b_cpt_sea_lion_v2_instruct", - "sarvam_2b", - "text_davinci_003", - "text_davinci_002", - "code_davinci_002", - "text_curie_001", - "text_babbage_001", - "text_ada_001", - ], - typing.Any, -] diff --git a/src/gooey/types/post_v3social_lookup_email_async_form_request_response_format_type.py b/src/gooey/types/post_v3social_lookup_email_async_form_request_response_format_type.py deleted file mode 100644 index d65902e..0000000 --- a/src/gooey/types/post_v3social_lookup_email_async_form_request_response_format_type.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3SocialLookupEmailAsyncFormRequestResponseFormatType = typing.Union[ - typing.Literal["text", "json_object"], typing.Any -] diff --git a/src/gooey/types/post_v3social_lookup_email_async_form_request_selected_model.py b/src/gooey/types/post_v3social_lookup_email_async_form_request_selected_model.py deleted file mode 100644 index 9defab1..0000000 --- a/src/gooey/types/post_v3social_lookup_email_async_form_request_selected_model.py +++ /dev/null @@ -1,47 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3SocialLookupEmailAsyncFormRequestSelectedModel = typing.Union[ - typing.Literal[ - "gpt_4_o", - "gpt_4_o_mini", - "chatgpt_4_o", - "gpt_4_turbo_vision", - "gpt_4_vision", - "gpt_4_turbo", - "gpt_4", - "gpt_4_32k", - "gpt_3_5_turbo", - "gpt_3_5_turbo_16k", - "gpt_3_5_turbo_instruct", - "llama3_70b", - "llama_3_groq_70b_tool_use", - "llama3_8b", - "llama_3_groq_8b_tool_use", - "llama2_70b_chat", - "mixtral_8x7b_instruct_0_1", - "gemma_2_9b_it", - "gemma_7b_it", - "gemini_1_5_flash", - "gemini_1_5_pro", - "gemini_1_pro_vision", - "gemini_1_pro", - "palm2_chat", - "palm2_text", - "claude_3_5_sonnet", - "claude_3_opus", - "claude_3_sonnet", - "claude_3_haiku", - "sea_lion_7b_instruct", - "llama3_8b_cpt_sea_lion_v2_instruct", - "sarvam_2b", - "text_davinci_003", - "text_davinci_002", - "code_davinci_002", - "text_curie_001", - "text_babbage_001", - "text_ada_001", - ], - typing.Any, -] diff --git a/src/gooey/types/post_v3text_to_speech_async_form_request_openai_tts_model.py b/src/gooey/types/post_v3text_to_speech_async_form_request_openai_tts_model.py deleted file mode 100644 index 5b996d2..0000000 --- a/src/gooey/types/post_v3text_to_speech_async_form_request_openai_tts_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3TextToSpeechAsyncFormRequestOpenaiTtsModel = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any] diff --git a/src/gooey/types/post_v3text_to_speech_async_form_request_openai_voice_name.py b/src/gooey/types/post_v3text_to_speech_async_form_request_openai_voice_name.py deleted file mode 100644 index 5e87d41..0000000 --- a/src/gooey/types/post_v3text_to_speech_async_form_request_openai_voice_name.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3TextToSpeechAsyncFormRequestOpenaiVoiceName = typing.Union[ - typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any -] diff --git a/src/gooey/types/post_v3text_to_speech_async_form_request_tts_provider.py b/src/gooey/types/post_v3text_to_speech_async_form_request_tts_provider.py deleted file mode 100644 index 066bcc0..0000000 --- a/src/gooey/types/post_v3text_to_speech_async_form_request_tts_provider.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3TextToSpeechAsyncFormRequestTtsProvider = typing.Union[ - typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any -] diff --git a/src/gooey/types/post_v3translate_async_form_request_selected_model.py b/src/gooey/types/post_v3translate_async_form_request_selected_model.py deleted file mode 100644 index 8314363..0000000 --- a/src/gooey/types/post_v3translate_async_form_request_selected_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3TranslateAsyncFormRequestSelectedModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any] diff --git a/src/gooey/types/post_v3video_bots_async_form_request_asr_model.py b/src/gooey/types/post_v3video_bots_async_form_request_asr_model.py deleted file mode 100644 index bc1c1e4..0000000 --- a/src/gooey/types/post_v3video_bots_async_form_request_asr_model.py +++ /dev/null @@ -1,23 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3VideoBotsAsyncFormRequestAsrModel = typing.Union[ - typing.Literal[ - "whisper_large_v2", - "whisper_large_v3", - "whisper_hindi_large_v2", - "whisper_telugu_large_v2", - "nemo_english", - "nemo_hindi", - "vakyansh_bhojpuri", - "gcp_v1", - "usm", - "deepgram", - "azure", - "seamless_m4t_v2", - "mms_1b_all", - "seamless_m4t", - ], - typing.Any, -] diff --git a/src/gooey/types/post_v3video_bots_async_form_request_citation_style.py b/src/gooey/types/post_v3video_bots_async_form_request_citation_style.py deleted file mode 100644 index b98a7c6..0000000 --- a/src/gooey/types/post_v3video_bots_async_form_request_citation_style.py +++ /dev/null @@ -1,25 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3VideoBotsAsyncFormRequestCitationStyle = typing.Union[ - typing.Literal[ - "number", - "title", - "url", - "symbol", - "markdown", - "html", - "slack_mrkdwn", - "plaintext", - "number_markdown", - "number_html", - "number_slack_mrkdwn", - "number_plaintext", - "symbol_markdown", - "symbol_html", - "symbol_slack_mrkdwn", - "symbol_plaintext", - ], - typing.Any, -] diff --git a/src/gooey/types/post_v3video_bots_async_form_request_embedding_model.py b/src/gooey/types/post_v3video_bots_async_form_request_embedding_model.py deleted file mode 100644 index bd68603..0000000 --- a/src/gooey/types/post_v3video_bots_async_form_request_embedding_model.py +++ /dev/null @@ -1,18 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3VideoBotsAsyncFormRequestEmbeddingModel = typing.Union[ - typing.Literal[ - "openai_3_large", - "openai_3_small", - "openai_ada_2", - "e5_large_v2", - "e5_base_v2", - "multilingual_e5_base", - "multilingual_e5_large", - "gte_large", - "gte_base", - ], - typing.Any, -] diff --git a/src/gooey/types/post_v3video_bots_async_form_request_lipsync_model.py b/src/gooey/types/post_v3video_bots_async_form_request_lipsync_model.py deleted file mode 100644 index 13db430..0000000 --- a/src/gooey/types/post_v3video_bots_async_form_request_lipsync_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3VideoBotsAsyncFormRequestLipsyncModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any] diff --git a/src/gooey/types/post_v3video_bots_async_form_request_openai_tts_model.py b/src/gooey/types/post_v3video_bots_async_form_request_openai_tts_model.py deleted file mode 100644 index 5a921f4..0000000 --- a/src/gooey/types/post_v3video_bots_async_form_request_openai_tts_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3VideoBotsAsyncFormRequestOpenaiTtsModel = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any] diff --git a/src/gooey/types/post_v3video_bots_async_form_request_openai_voice_name.py b/src/gooey/types/post_v3video_bots_async_form_request_openai_voice_name.py deleted file mode 100644 index b945a73..0000000 --- a/src/gooey/types/post_v3video_bots_async_form_request_openai_voice_name.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3VideoBotsAsyncFormRequestOpenaiVoiceName = typing.Union[ - typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any -] diff --git a/src/gooey/types/post_v3video_bots_async_form_request_response_format_type.py b/src/gooey/types/post_v3video_bots_async_form_request_response_format_type.py deleted file mode 100644 index 8b486eb..0000000 --- a/src/gooey/types/post_v3video_bots_async_form_request_response_format_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3VideoBotsAsyncFormRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/post_v3video_bots_async_form_request_selected_model.py b/src/gooey/types/post_v3video_bots_async_form_request_selected_model.py deleted file mode 100644 index 8448c26..0000000 --- a/src/gooey/types/post_v3video_bots_async_form_request_selected_model.py +++ /dev/null @@ -1,47 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3VideoBotsAsyncFormRequestSelectedModel = typing.Union[ - typing.Literal[ - "gpt_4_o", - "gpt_4_o_mini", - "chatgpt_4_o", - "gpt_4_turbo_vision", - "gpt_4_vision", - "gpt_4_turbo", - "gpt_4", - "gpt_4_32k", - "gpt_3_5_turbo", - "gpt_3_5_turbo_16k", - "gpt_3_5_turbo_instruct", - "llama3_70b", - "llama_3_groq_70b_tool_use", - "llama3_8b", - "llama_3_groq_8b_tool_use", - "llama2_70b_chat", - "mixtral_8x7b_instruct_0_1", - "gemma_2_9b_it", - "gemma_7b_it", - "gemini_1_5_flash", - "gemini_1_5_pro", - "gemini_1_pro_vision", - "gemini_1_pro", - "palm2_chat", - "palm2_text", - "claude_3_5_sonnet", - "claude_3_opus", - "claude_3_sonnet", - "claude_3_haiku", - "sea_lion_7b_instruct", - "llama3_8b_cpt_sea_lion_v2_instruct", - "sarvam_2b", - "text_davinci_003", - "text_davinci_002", - "code_davinci_002", - "text_curie_001", - "text_babbage_001", - "text_ada_001", - ], - typing.Any, -] diff --git a/src/gooey/types/post_v3video_bots_async_form_request_translation_model.py b/src/gooey/types/post_v3video_bots_async_form_request_translation_model.py deleted file mode 100644 index 3c7d0ae..0000000 --- a/src/gooey/types/post_v3video_bots_async_form_request_translation_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3VideoBotsAsyncFormRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any] diff --git a/src/gooey/types/post_v3video_bots_async_form_request_tts_provider.py b/src/gooey/types/post_v3video_bots_async_form_request_tts_provider.py deleted file mode 100644 index c223beb..0000000 --- a/src/gooey/types/post_v3video_bots_async_form_request_tts_provider.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostV3VideoBotsAsyncFormRequestTtsProvider = typing.Union[ - typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any -]