From 021be769804fb0c6a0a69bfe43c97661092f38c7 Mon Sep 17 00:00:00 2001 From: fern-api <115122769+fern-api[bot]@users.noreply.github.com> Date: Wed, 11 Sep 2024 10:48:58 +0000 Subject: [PATCH] Release 0.0.1-beta12 --- README.md | 28 +- pyproject.toml | 2 +- reference.md | 5842 +++++++++++-- src/gooey/__init__.py | 228 +- src/gooey/client.py | 7552 ++++++++++++++--- .../copilot_for_your_enterprise/__init__.py | 47 + .../copilot_for_your_enterprise/client.py | 745 ++ .../types/__init__.py | 47 + .../async_video_bots_request_asr_model.py | 23 + ...async_video_bots_request_citation_style.py | 25 + ...sync_video_bots_request_embedding_model.py | 18 + ...async_video_bots_request_functions_item.py | 24 + ...deo_bots_request_functions_item_trigger.py | 5 + .../async_video_bots_request_lipsync_model.py | 5 + .../async_video_bots_request_messages_item.py | 23 + ...ideo_bots_request_messages_item_content.py | 6 + ...bots_request_messages_item_content_item.py | 41 + ...c_video_bots_request_messages_item_role.py | 5 + ...ync_video_bots_request_openai_tts_model.py | 5 + ...nc_video_bots_request_openai_voice_name.py | 7 + ...video_bots_request_response_format_type.py | 5 + ...c_video_bots_request_sadtalker_settings.py | 40 + ...s_request_sadtalker_settings_preprocess.py | 7 + ...async_video_bots_request_selected_model.py | 47 + ...nc_video_bots_request_translation_model.py | 5 + .../async_video_bots_request_tts_provider.py | 7 + src/gooey/core/client_wrapper.py | 2 +- src/gooey/errors/__init__.py | 3 +- src/gooey/errors/too_many_requests_error.py | 9 + src/gooey/evaluator/__init__.py | 21 + src/gooey/evaluator/client.py | 342 + src/gooey/evaluator/types/__init__.py | 19 + ...lk_eval_page_request_agg_functions_item.py | 4 +- ...age_request_agg_functions_item_function.py | 0 ...ulk_eval_page_request_eval_prompts_item.py | 4 +- .../bulk_eval_page_request_functions_item.py | 4 +- ...val_page_request_functions_item_trigger.py | 0 ..._eval_page_request_response_format_type.py | 0 .../bulk_eval_page_request_selected_model.py | 0 src/gooey/functions/__init__.py | 2 + src/gooey/functions/client.py | 231 + src/gooey/lip_syncing/__init__.py | 17 + src/gooey/lip_syncing/client.py | 305 + src/gooey/lip_syncing/types/__init__.py | 15 + .../async_lipsync_request_functions_item.py | 24 + ..._lipsync_request_functions_item_trigger.py | 5 + ...sync_lipsync_request_sadtalker_settings.py | 40 + ...c_request_sadtalker_settings_preprocess.py | 7 + .../async_lipsync_request_selected_model.py | 5 + src/gooey/smart_gpt/__init__.py | 15 + src/gooey/smart_gpt/client.py | 324 + src/gooey/smart_gpt/types/__init__.py | 13 + .../smart_gpt_page_request_functions_item.py | 4 +- ...gpt_page_request_functions_item_trigger.py | 0 ...t_gpt_page_request_response_format_type.py | 0 .../smart_gpt_page_request_selected_model.py | 0 src/gooey/types/__init__.py | 160 +- src/gooey/types/bulk_eval_page_request.py | 56 - .../types/bulk_run_request_functions_item.py | 24 + ...bulk_run_request_functions_item_trigger.py | 5 + src/gooey/types/compare_llm_page_request.py | 37 - .../types/compare_text2img_page_request.py | 44 - src/gooey/types/deforum_sd_page_request.py | 41 - src/gooey/types/doc_search_page_request.py | 56 - .../doc_summary_request_functions_item.py | 24 + ..._summary_request_functions_item_trigger.py | 5 + ...oc_summary_request_response_format_type.py | 5 + .../doc_summary_request_selected_asr_model.py | 23 + .../doc_summary_request_selected_model.py | 47 + .../email_face_inpainting_page_request.py | 51 - src/gooey/types/embeddings_page_request.py | 30 - src/gooey/types/google_gpt_page_request.py | 66 - .../types/google_image_gen_page_request.py | 46 - .../lipsync_tts_request_functions_item.py | 24 + ...sync_tts_request_functions_item_trigger.py | 5 + .../lipsync_tts_request_openai_tts_model.py | 5 + .../lipsync_tts_request_openai_voice_name.py | 7 + .../lipsync_tts_request_sadtalker_settings.py | 40 + ...s_request_sadtalker_settings_preprocess.py | 7 + .../lipsync_tts_request_selected_model.py | 5 + .../types/lipsync_tts_request_tts_provider.py | 7 + .../types/portrait_request_functions_item.py | 24 + ...portrait_request_functions_item_trigger.py | 5 + .../types/portrait_request_selected_model.py | 5 + .../product_image_request_functions_item.py | 24 + ...ct_image_request_functions_item_trigger.py | 5 + .../product_image_request_selected_model.py | 5 + .../types/qr_code_request_functions_item.py | 24 + .../qr_code_request_functions_item_trigger.py | 5 + ...est_image_prompt_controlnet_models_item.py | 20 + .../types/qr_code_request_qr_code_vcard.py | 44 + src/gooey/types/qr_code_request_scheduler.py | 23 + ..._request_selected_controlnet_model_item.py | 20 + .../types/qr_code_request_selected_model.py | 22 + .../types/related_qn_a_doc_page_request.py | 70 - src/gooey/types/related_qn_a_page_request.py | 66 - .../remix_image_request_functions_item.py | 24 + ...ix_image_request_functions_item_trigger.py | 5 + ...image_request_selected_controlnet_model.py | 19 + ..._request_selected_controlnet_model_item.py | 20 + .../remix_image_request_selected_model.py | 21 + ...emove_background_request_functions_item.py | 24 + ...ckground_request_functions_item_trigger.py | 5 + ...emove_background_request_selected_model.py | 5 + src/gooey/types/seo_summary_page_request.py | 52 - src/gooey/types/smart_gpt_page_request.py | 40 - .../types/social_lookup_email_page_request.py | 38 - ...ech_recognition_request_functions_item.py} | 18 +- ...ognition_request_functions_item_trigger.py | 5 + ...peech_recognition_request_output_format.py | 5 + ...eech_recognition_request_selected_model.py | 23 + ...h_recognition_request_translation_model.py | 5 + .../synthesize_data_request_functions_item.py | 24 + ...ize_data_request_functions_item_trigger.py | 5 + ...esize_data_request_response_format_type.py | 5 + ...thesize_data_request_selected_asr_model.py | 23 + .../synthesize_data_request_selected_model.py | 47 + src/gooey/types/text2audio_page_request.py | 36 - .../types/text_to_speech_page_request.py | 53 - .../types/translate_request_functions_item.py | 24 + ...ranslate_request_functions_item_trigger.py | 5 + .../types/translate_request_selected_model.py | 5 + .../types/upscale_request_functions_item.py | 24 + .../upscale_request_functions_item_trigger.py | 5 + .../upscale_request_selected_models_item.py | 7 + 125 files changed, 15421 insertions(+), 2537 deletions(-) create mode 100644 src/gooey/copilot_for_your_enterprise/__init__.py create mode 100644 src/gooey/copilot_for_your_enterprise/client.py create mode 100644 src/gooey/copilot_for_your_enterprise/types/__init__.py create mode 100644 src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_asr_model.py create mode 100644 src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_citation_style.py create mode 100644 src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_embedding_model.py create mode 100644 src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_functions_item.py create mode 100644 src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_functions_item_trigger.py create mode 100644 src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_lipsync_model.py create mode 100644 src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_messages_item.py create mode 100644 src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_messages_item_content.py create mode 100644 src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_messages_item_content_item.py create mode 100644 src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_messages_item_role.py create mode 100644 src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_openai_tts_model.py create mode 100644 src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_openai_voice_name.py create mode 100644 src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_response_format_type.py create mode 100644 src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_sadtalker_settings.py create mode 100644 src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_sadtalker_settings_preprocess.py create mode 100644 src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_selected_model.py create mode 100644 src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_translation_model.py create mode 100644 src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_tts_provider.py create mode 100644 src/gooey/errors/too_many_requests_error.py create mode 100644 src/gooey/evaluator/__init__.py create mode 100644 src/gooey/evaluator/client.py create mode 100644 src/gooey/evaluator/types/__init__.py rename src/gooey/{ => evaluator}/types/bulk_eval_page_request_agg_functions_item.py (85%) rename src/gooey/{ => evaluator}/types/bulk_eval_page_request_agg_functions_item_function.py (100%) rename src/gooey/{ => evaluator}/types/bulk_eval_page_request_eval_prompts_item.py (81%) rename src/gooey/{ => evaluator}/types/bulk_eval_page_request_functions_item.py (86%) rename src/gooey/{ => evaluator}/types/bulk_eval_page_request_functions_item_trigger.py (100%) rename src/gooey/{ => evaluator}/types/bulk_eval_page_request_response_format_type.py (100%) rename src/gooey/{ => evaluator}/types/bulk_eval_page_request_selected_model.py (100%) create mode 100644 src/gooey/functions/__init__.py create mode 100644 src/gooey/functions/client.py create mode 100644 src/gooey/lip_syncing/__init__.py create mode 100644 src/gooey/lip_syncing/client.py create mode 100644 src/gooey/lip_syncing/types/__init__.py create mode 100644 src/gooey/lip_syncing/types/async_lipsync_request_functions_item.py create mode 100644 src/gooey/lip_syncing/types/async_lipsync_request_functions_item_trigger.py create mode 100644 src/gooey/lip_syncing/types/async_lipsync_request_sadtalker_settings.py create mode 100644 src/gooey/lip_syncing/types/async_lipsync_request_sadtalker_settings_preprocess.py create mode 100644 src/gooey/lip_syncing/types/async_lipsync_request_selected_model.py create mode 100644 src/gooey/smart_gpt/__init__.py create mode 100644 src/gooey/smart_gpt/client.py create mode 100644 src/gooey/smart_gpt/types/__init__.py rename src/gooey/{ => smart_gpt}/types/smart_gpt_page_request_functions_item.py (86%) rename src/gooey/{ => smart_gpt}/types/smart_gpt_page_request_functions_item_trigger.py (100%) rename src/gooey/{ => smart_gpt}/types/smart_gpt_page_request_response_format_type.py (100%) rename src/gooey/{ => smart_gpt}/types/smart_gpt_page_request_selected_model.py (100%) delete mode 100644 src/gooey/types/bulk_eval_page_request.py create mode 100644 src/gooey/types/bulk_run_request_functions_item.py create mode 100644 src/gooey/types/bulk_run_request_functions_item_trigger.py delete mode 100644 src/gooey/types/compare_llm_page_request.py delete mode 100644 src/gooey/types/compare_text2img_page_request.py delete mode 100644 src/gooey/types/deforum_sd_page_request.py delete mode 100644 src/gooey/types/doc_search_page_request.py create mode 100644 src/gooey/types/doc_summary_request_functions_item.py create mode 100644 src/gooey/types/doc_summary_request_functions_item_trigger.py create mode 100644 src/gooey/types/doc_summary_request_response_format_type.py create mode 100644 src/gooey/types/doc_summary_request_selected_asr_model.py create mode 100644 src/gooey/types/doc_summary_request_selected_model.py delete mode 100644 src/gooey/types/email_face_inpainting_page_request.py delete mode 100644 src/gooey/types/embeddings_page_request.py delete mode 100644 src/gooey/types/google_gpt_page_request.py delete mode 100644 src/gooey/types/google_image_gen_page_request.py create mode 100644 src/gooey/types/lipsync_tts_request_functions_item.py create mode 100644 src/gooey/types/lipsync_tts_request_functions_item_trigger.py create mode 100644 src/gooey/types/lipsync_tts_request_openai_tts_model.py create mode 100644 src/gooey/types/lipsync_tts_request_openai_voice_name.py create mode 100644 src/gooey/types/lipsync_tts_request_sadtalker_settings.py create mode 100644 src/gooey/types/lipsync_tts_request_sadtalker_settings_preprocess.py create mode 100644 src/gooey/types/lipsync_tts_request_selected_model.py create mode 100644 src/gooey/types/lipsync_tts_request_tts_provider.py create mode 100644 src/gooey/types/portrait_request_functions_item.py create mode 100644 src/gooey/types/portrait_request_functions_item_trigger.py create mode 100644 src/gooey/types/portrait_request_selected_model.py create mode 100644 src/gooey/types/product_image_request_functions_item.py create mode 100644 src/gooey/types/product_image_request_functions_item_trigger.py create mode 100644 src/gooey/types/product_image_request_selected_model.py create mode 100644 src/gooey/types/qr_code_request_functions_item.py create mode 100644 src/gooey/types/qr_code_request_functions_item_trigger.py create mode 100644 src/gooey/types/qr_code_request_image_prompt_controlnet_models_item.py create mode 100644 src/gooey/types/qr_code_request_qr_code_vcard.py create mode 100644 src/gooey/types/qr_code_request_scheduler.py create mode 100644 src/gooey/types/qr_code_request_selected_controlnet_model_item.py create mode 100644 src/gooey/types/qr_code_request_selected_model.py delete mode 100644 src/gooey/types/related_qn_a_doc_page_request.py delete mode 100644 src/gooey/types/related_qn_a_page_request.py create mode 100644 src/gooey/types/remix_image_request_functions_item.py create mode 100644 src/gooey/types/remix_image_request_functions_item_trigger.py create mode 100644 src/gooey/types/remix_image_request_selected_controlnet_model.py create mode 100644 src/gooey/types/remix_image_request_selected_controlnet_model_item.py create mode 100644 src/gooey/types/remix_image_request_selected_model.py create mode 100644 src/gooey/types/remove_background_request_functions_item.py create mode 100644 src/gooey/types/remove_background_request_functions_item_trigger.py create mode 100644 src/gooey/types/remove_background_request_selected_model.py delete mode 100644 src/gooey/types/seo_summary_page_request.py delete mode 100644 src/gooey/types/smart_gpt_page_request.py delete mode 100644 src/gooey/types/social_lookup_email_page_request.py rename src/gooey/types/{functions_page_request.py => speech_recognition_request_functions_item.py} (56%) create mode 100644 src/gooey/types/speech_recognition_request_functions_item_trigger.py create mode 100644 src/gooey/types/speech_recognition_request_output_format.py create mode 100644 src/gooey/types/speech_recognition_request_selected_model.py create mode 100644 src/gooey/types/speech_recognition_request_translation_model.py create mode 100644 src/gooey/types/synthesize_data_request_functions_item.py create mode 100644 src/gooey/types/synthesize_data_request_functions_item_trigger.py create mode 100644 src/gooey/types/synthesize_data_request_response_format_type.py create mode 100644 src/gooey/types/synthesize_data_request_selected_asr_model.py create mode 100644 src/gooey/types/synthesize_data_request_selected_model.py delete mode 100644 src/gooey/types/text2audio_page_request.py delete mode 100644 src/gooey/types/text_to_speech_page_request.py create mode 100644 src/gooey/types/translate_request_functions_item.py create mode 100644 src/gooey/types/translate_request_functions_item_trigger.py create mode 100644 src/gooey/types/translate_request_selected_model.py create mode 100644 src/gooey/types/upscale_request_functions_item.py create mode 100644 src/gooey/types/upscale_request_functions_item_trigger.py create mode 100644 src/gooey/types/upscale_request_selected_models_item.py diff --git a/README.md b/README.md index 19ff2e0..8569e77 100644 --- a/README.md +++ b/README.md @@ -16,12 +16,19 @@ pip install gooeyai Instantiate and use the client with the following: ```python -from gooey import Gooey +from gooey import DeforumSdPageRequestAnimationPromptsItem, Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.post_v3video_bots_async() +client.animate( + animation_prompts=[ + DeforumSdPageRequestAnimationPromptsItem( + frame="frame", + prompt="prompt", + ) + ], +) ``` ## Async Client @@ -31,7 +38,7 @@ The SDK also exports an `async` client so that you can make non-blocking calls t ```python import asyncio -from gooey import AsyncGooey +from gooey import AsyncGooey, DeforumSdPageRequestAnimationPromptsItem client = AsyncGooey( api_key="YOUR_API_KEY", @@ -39,7 +46,14 @@ client = AsyncGooey( async def main() -> None: - await client.post_v3video_bots_async() + await client.animate( + animation_prompts=[ + DeforumSdPageRequestAnimationPromptsItem( + frame="frame", + prompt="prompt", + ) + ], + ) asyncio.run(main()) @@ -54,7 +68,7 @@ will be thrown. from gooey.core.api_error import ApiError try: - client.post_v3video_bots_async(...) + client.animate(...) except ApiError as e: print(e.status_code) print(e.body) @@ -77,7 +91,7 @@ A request is deemed retriable when any of the following HTTP status codes is ret Use the `max_retries` request option to configure this behavior. ```python -client.post_v3video_bots_async(..., { +client.animate(..., { "max_retries": 1 }) ``` @@ -97,7 +111,7 @@ client = Gooey( # Override timeout for a specific method -client.post_v3video_bots_async(..., { +client.animate(..., { "timeout_in_seconds": 1 }) ``` diff --git a/pyproject.toml b/pyproject.toml index 3a513bd..989ad22 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "gooeyai" -version = "0.0.1-beta11" +version = "0.0.1-beta12" description = "" readme = "README.md" authors = [] diff --git a/reference.md b/reference.md index 1b4756d..b905fb3 100644 --- a/reference.md +++ b/reference.md @@ -1,5 +1,5 @@ # Reference -
client.post_v3video_bots_async() +
client.animate(...)
@@ -12,12 +12,19 @@
```python -from gooey import Gooey +from gooey import DeforumSdPageRequestAnimationPromptsItem, Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.post_v3video_bots_async() +client.animate( + animation_prompts=[ + DeforumSdPageRequestAnimationPromptsItem( + frame="frame", + prompt="prompt", + ) + ], +) ```
@@ -33,95 +40,131 @@ client.post_v3video_bots_async()
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**animation_prompts:** `typing.Sequence[DeforumSdPageRequestAnimationPromptsItem]`
- -
+
+
+**example_id:** `typing.Optional[str]` +
-
-
client.post_v3deforum_sd_async()
-#### 🔌 Usage +**functions:** `typing.Optional[typing.Sequence[DeforumSdPageRequestFunctionsItem]]` + +
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3deforum_sd_async() - -``` +**max_frames:** `typing.Optional[int]` +
+ +
+
+ +**selected_model:** `typing.Optional[DeforumSdPageRequestSelectedModel]` +
-#### ⚙️ Parameters -
+**animation_mode:** `typing.Optional[str]` + +
+
+
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**zoom:** `typing.Optional[str]`
+ +
+
+ +**translation_x:** `typing.Optional[str]` +
+
+
+**translation_y:** `typing.Optional[str]` +
-
-
client.post_v3art_qr_code_async()
-#### 🔌 Usage +**rotation3d_x:** `typing.Optional[str]` + +
+
+**rotation3d_y:** `typing.Optional[str]` + +
+
+
-```python -from gooey import Gooey +**rotation3d_z:** `typing.Optional[str]` + +
+
-client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3art_qr_code_async() +
+
-``` +**fps:** `typing.Optional[int]` +
+ +
+
+ +**seed:** `typing.Optional[int]` +
-#### ⚙️ Parameters -
+**settings:** `typing.Optional[RunSettings]` + +
+
+
@@ -137,7 +180,7 @@ client.post_v3art_qr_code_async()
-
client.post_v3related_qna_maker_async() +
client.qr_code(...)
@@ -155,7 +198,9 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.post_v3related_qna_maker_async() +client.qr_code( + text_prompt="text_prompt", +) ```
@@ -171,233 +216,247 @@ client.post_v3related_qna_maker_async()
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**text_prompt:** `str`
- -
+
+
+**example_id:** `typing.Optional[str]` +
-
-
client.post_v3seo_summary_async()
-#### 🔌 Usage +**functions:** `typing.Optional[typing.List[QrCodeRequestFunctionsItem]]` + +
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3seo_summary_async() - -``` -
-
+**qr_code_data:** `typing.Optional[str]` + -#### ⚙️ Parameters -
-
-
+**qr_code_input_image:** `from __future__ import annotations -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +typing.Optional[core.File]` — See core.File for more documentation
-
-
+
+
+**qr_code_vcard:** `typing.Optional[QrCodeRequestQrCodeVcard]` +
-
-
client.post_v3google_gpt_async()
-#### 🔌 Usage +**qr_code_file:** `from __future__ import annotations -
-
+typing.Optional[core.File]` — See core.File for more documentation + +
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3google_gpt_async() - -``` -
-
+**use_url_shortener:** `typing.Optional[bool]` +
-#### ⚙️ Parameters -
+**negative_prompt:** `typing.Optional[str]` + +
+
+
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**image_prompt:** `typing.Optional[str]`
- - +
+
+**image_prompt_controlnet_models:** `typing.Optional[typing.List[QrCodeRequestImagePromptControlnetModelsItem]]` +
-
-
client.post_v3social_lookup_email_async()
-#### 🔌 Usage +**image_prompt_strength:** `typing.Optional[float]` + +
+
+**image_prompt_scale:** `typing.Optional[float]` + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3social_lookup_email_async() - -``` -
-
+**image_prompt_pos_x:** `typing.Optional[float]` + -#### ⚙️ Parameters -
+**image_prompt_pos_y:** `typing.Optional[float]` + +
+
+
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**selected_model:** `typing.Optional[QrCodeRequestSelectedModel]`
- - +
+
+**selected_controlnet_model:** `typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]]` +
-
-
client.post_v3bulk_runner_async()
-#### 🔌 Usage +**output_width:** `typing.Optional[int]` + +
+
+**output_height:** `typing.Optional[int]` + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3bulk_runner_async() - -``` -
-
+**guidance_scale:** `typing.Optional[float]` + -#### ⚙️ Parameters -
+**controlnet_conditioning_scale:** `typing.Optional[typing.List[float]]` + +
+
+
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**num_outputs:** `typing.Optional[int]`
- - +
+
+**quality:** `typing.Optional[int]` +
-
-
client.post_v3bulk_eval_async()
-#### 🔌 Usage +**scheduler:** `typing.Optional[QrCodeRequestScheduler]` + +
+
+**seed:** `typing.Optional[int]` + +
+
+
-```python -from gooey import Gooey +**obj_scale:** `typing.Optional[float]` + +
+
-client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3bulk_eval_async() +
+
-``` +**obj_pos_x:** `typing.Optional[float]` +
+ +
+
+ +**obj_pos_y:** `typing.Optional[float]` +
-#### ⚙️ Parameters -
+**settings:** `typing.Optional[RunSettings]` + +
+
+
@@ -413,7 +472,7 @@ client.post_v3bulk_eval_async()
-
client.post_v3doc_extract_async() +
client.seo_people_also_ask(...)
@@ -431,7 +490,10 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.post_v3doc_extract_async() +client.seo_people_also_ask( + search_query="search_query", + site_filter="site_filter", +) ```
@@ -447,141 +509,208 @@ client.post_v3doc_extract_async()
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**search_query:** `str`
- -
+
+
+**site_filter:** `str` +
-
-
client.post_v3compare_llm_async()
-#### 🔌 Usage +**example_id:** `typing.Optional[str]` + +
+
+**functions:** `typing.Optional[typing.Sequence[RelatedQnAPageRequestFunctionsItem]]` + +
+
+
-```python -from gooey import Gooey +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
-client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3compare_llm_async() +
+
-``` +**task_instructions:** `typing.Optional[str]` +
+ +
+
+ +**query_instructions:** `typing.Optional[str]` +
-#### ⚙️ Parameters -
+**selected_model:** `typing.Optional[RelatedQnAPageRequestSelectedModel]` + +
+
+
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**max_search_urls:** `typing.Optional[int]`
+ +
+
+ +**max_references:** `typing.Optional[int]` +
+
+
+**max_context_words:** `typing.Optional[int]` +
-
-
client.post_v3doc_search_async()
-#### 🔌 Usage +**scroll_jump:** `typing.Optional[int]` + +
+
+**embedding_model:** `typing.Optional[RelatedQnAPageRequestEmbeddingModel]` + +
+
+
-```python -from gooey import Gooey +**dense_weight:** `typing.Optional[float]` -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3doc_search_async() -``` +Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. +Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + +
+ +
+
+ +**avoid_repetition:** `typing.Optional[bool]` +
-#### ⚙️ Parameters -
+**num_outputs:** `typing.Optional[int]` + +
+
+
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**quality:** `typing.Optional[float]`
+ +
+
+ +**max_tokens:** `typing.Optional[int]` +
+
+
+**sampling_temperature:** `typing.Optional[float]` +
-
-
client.post_v3smart_gpt_async()
-#### 🔌 Usage +**response_format_type:** `typing.Optional[RelatedQnAPageRequestResponseFormatType]` + +
+
+**serp_search_location:** `typing.Optional[SerpSearchLocation]` + +
+
+
-```python -from gooey import Gooey +**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead + +
+
-client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3smart_gpt_async() +
+
-``` +**serp_search_type:** `typing.Optional[SerpSearchType]` +
+ +
+
+ +**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead +
-#### ⚙️ Parameters -
+**settings:** `typing.Optional[RunSettings]` + +
+
+
@@ -597,7 +726,7 @@ client.post_v3smart_gpt_async()
-
client.post_v3doc_summary_async() +
client.seo_content(...)
@@ -615,7 +744,12 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.post_v3doc_summary_async() +client.seo_content( + search_query="search_query", + keywords="keywords", + title="title", + company_url="company_url", +) ```
@@ -631,141 +765,179 @@ client.post_v3doc_summary_async()
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**search_query:** `str`
- -
+
+
+**keywords:** `str` +
-
-
client.post_v3functions_async()
-#### 🔌 Usage +**title:** `str` + +
+
+**company_url:** `str` + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3functions_async() - -``` -
-
+**example_id:** `typing.Optional[str]` + -#### ⚙️ Parameters -
+**task_instructions:** `typing.Optional[str]` + +
+
+
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**enable_html:** `typing.Optional[bool]`
- - +
+
+**selected_model:** `typing.Optional[SeoSummaryPageRequestSelectedModel]` +
-
-
client.post_v3lipsync_async()
-#### 🔌 Usage +**max_search_urls:** `typing.Optional[int]` + +
+
+**enable_crosslinks:** `typing.Optional[bool]` + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3lipsync_async() - -``` +**seed:** `typing.Optional[int]` +
+ +
+
+ +**avoid_repetition:** `typing.Optional[bool]` +
-#### ⚙️ Parameters -
+**num_outputs:** `typing.Optional[int]` + +
+
+
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**quality:** `typing.Optional[float]`
+ +
+
+ +**max_tokens:** `typing.Optional[int]` +
+
+
+**sampling_temperature:** `typing.Optional[float]` +
-
-
client.post_v3lipsync_tts_async()
-#### 🔌 Usage +**response_format_type:** `typing.Optional[SeoSummaryPageRequestResponseFormatType]` + +
+
+**serp_search_location:** `typing.Optional[SerpSearchLocation]` + +
+
+
-```python -from gooey import Gooey +**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead + +
+
-client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3lipsync_tts_async() +
+
-``` +**serp_search_type:** `typing.Optional[SerpSearchType]` +
+ +
+
+ +**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead +
-#### ⚙️ Parameters -
+**settings:** `typing.Optional[RunSettings]` + +
+
+
@@ -781,7 +953,7 @@ client.post_v3lipsync_tts_async()
-
client.post_v3text_to_speech_async() +
client.web_search_llm(...)
@@ -799,7 +971,10 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.post_v3text_to_speech_async() +client.web_search_llm( + search_query="search_query", + site_filter="site_filter", +) ```
@@ -815,141 +990,208 @@ client.post_v3text_to_speech_async()
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**search_query:** `str`
- -
+
+
+**site_filter:** `str` +
-
-
client.post_v3asr_async()
-#### 🔌 Usage +**example_id:** `typing.Optional[str]` + +
+
+**functions:** `typing.Optional[typing.Sequence[GoogleGptPageRequestFunctionsItem]]` + +
+
+
-```python -from gooey import Gooey +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
-client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3asr_async() +
+
-``` +**task_instructions:** `typing.Optional[str]` +
+ +
+
+ +**query_instructions:** `typing.Optional[str]` +
-#### ⚙️ Parameters -
+**selected_model:** `typing.Optional[GoogleGptPageRequestSelectedModel]` + +
+
+
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**max_search_urls:** `typing.Optional[int]`
+ +
+
+ +**max_references:** `typing.Optional[int]` +
+
+
+**max_context_words:** `typing.Optional[int]` +
-
-
client.post_v3text2audio_async()
-#### 🔌 Usage +**scroll_jump:** `typing.Optional[int]` + +
+
+**embedding_model:** `typing.Optional[GoogleGptPageRequestEmbeddingModel]` + +
+
+
-```python -from gooey import Gooey +**dense_weight:** `typing.Optional[float]` -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3text2audio_async() -``` +Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. +Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + +
+ +
+
+ +**avoid_repetition:** `typing.Optional[bool]` +
-#### ⚙️ Parameters -
+**num_outputs:** `typing.Optional[int]` + +
+
+
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**quality:** `typing.Optional[float]`
+ +
+
+ +**max_tokens:** `typing.Optional[int]` +
+
+
+**sampling_temperature:** `typing.Optional[float]` +
-
-
client.post_v3translate_async()
-#### 🔌 Usage +**response_format_type:** `typing.Optional[GoogleGptPageRequestResponseFormatType]` + +
+
+**serp_search_location:** `typing.Optional[SerpSearchLocation]` + +
+
+
-```python -from gooey import Gooey +**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead + +
+
-client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3translate_async() +
+
-``` +**serp_search_type:** `typing.Optional[SerpSearchType]` +
+ +
+
+ +**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead +
-#### ⚙️ Parameters -
+**settings:** `typing.Optional[RunSettings]` + +
+
+
@@ -965,7 +1207,7 @@ client.post_v3translate_async()
-
client.post_v3img2img_async() +
client.personalize_email(...)
@@ -983,7 +1225,9 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.post_v3img2img_async() +client.personalize_email( + email_address="email_address", +) ```
@@ -999,95 +1243,107 @@ client.post_v3img2img_async()
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**email_address:** `str`
- -
+
+
+**example_id:** `typing.Optional[str]` +
-
-
client.post_v3compare_text2img_async()
-#### 🔌 Usage +**functions:** `typing.Optional[typing.Sequence[SocialLookupEmailPageRequestFunctionsItem]]` + +
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3compare_text2img_async() - -``` -
-
+**input_prompt:** `typing.Optional[str]` + -#### ⚙️ Parameters -
+**selected_model:** `typing.Optional[SocialLookupEmailPageRequestSelectedModel]` + +
+
+
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**avoid_repetition:** `typing.Optional[bool]`
- - +
+
+**num_outputs:** `typing.Optional[int]` +
-
-
client.post_v3object_inpainting_async()
-#### 🔌 Usage +**quality:** `typing.Optional[float]` + +
+
+**max_tokens:** `typing.Optional[int]` + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3object_inpainting_async() - -``` +**sampling_temperature:** `typing.Optional[float]` +
+ +
+
+ +**response_format_type:** `typing.Optional[SocialLookupEmailPageRequestResponseFormatType]` +
-#### ⚙️ Parameters -
+**settings:** `typing.Optional[RunSettings]` + +
+
+
@@ -1103,7 +1359,7 @@ client.post_v3object_inpainting_async()
-
client.post_v3face_inpainting_async() +
client.bulk_run(...)
@@ -1121,7 +1377,11 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.post_v3face_inpainting_async() +client.bulk_run( + run_urls=["run_urls"], + input_columns={"key": "value"}, + output_columns={"key": "value"}, +) ```
@@ -1137,49 +1397,94 @@ client.post_v3face_inpainting_async()
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**documents:** `from __future__ import annotations + +typing.List[core.File]` — See core.File for more documentation
+ +
+
+ +**run_urls:** `typing.List[str]` + + +Provide one or more Gooey.AI workflow runs. +You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them. + +
+
+
+ +**input_columns:** `typing.Dict[str, str]` + +For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it. + +
-
-
client.post_v3email_face_inpainting_async()
-#### 🔌 Usage +**output_columns:** `typing.Dict[str, str]` + + +For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data. + + +
+
+**example_id:** `typing.Optional[str]` + +
+
+
-```python -from gooey import Gooey +**functions:** `typing.Optional[typing.List[BulkRunRequestFunctionsItem]]` + +
+
-client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3email_face_inpainting_async() +
+
-``` +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments +
+ +
+
+ +**eval_urls:** `typing.Optional[typing.List[str]]` + + +_(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs. + +
-#### ⚙️ Parameters -
+**settings:** `typing.Optional[RunSettings]` + +
+
+
@@ -1195,7 +1500,7 @@ client.post_v3email_face_inpainting_async()
-
client.post_v3google_image_gen_async() +
client.synthesize_data(...)
@@ -1213,7 +1518,7 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.post_v3google_image_gen_async() +client.synthesize_data() ```
@@ -1229,95 +1534,145 @@ client.post_v3google_image_gen_async()
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**documents:** `from __future__ import annotations + +typing.List[core.File]` — See core.File for more documentation
- -
+
+
+**example_id:** `typing.Optional[str]` +
-
-
client.post_v3image_segmentation_async()
-#### 🔌 Usage +**functions:** `typing.Optional[typing.List[SynthesizeDataRequestFunctionsItem]]` + +
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3image_segmentation_async() +**sheet_url:** `from __future__ import annotations -``` +typing.Optional[core.File]` — See core.File for more documentation +
+ +
+
+ +**selected_asr_model:** `typing.Optional[SynthesizeDataRequestSelectedAsrModel]` +
-#### ⚙️ Parameters -
+**google_translate_target:** `typing.Optional[str]` + +
+
+
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**glossary_document:** `from __future__ import annotations + +typing.Optional[core.File]` — See core.File for more documentation
+ +
+
+ +**task_instructions:** `typing.Optional[str]` +
+
+
+**selected_model:** `typing.Optional[SynthesizeDataRequestSelectedModel]` +
-
-
client.post_v3compare_ai_upscalers_async()
-#### 🔌 Usage +**avoid_repetition:** `typing.Optional[bool]` + +
+
+**num_outputs:** `typing.Optional[int]` + +
+
+
-```python -from gooey import Gooey +**quality:** `typing.Optional[float]` + +
+
-client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3compare_ai_upscalers_async() +
+
-``` +**max_tokens:** `typing.Optional[int]` +
+ +
+
+ +**sampling_temperature:** `typing.Optional[float]` +
-#### ⚙️ Parameters +
+
+ +**response_format_type:** `typing.Optional[SynthesizeDataRequestResponseFormatType]` + +
+
+**settings:** `typing.Optional[RunSettings]` + +
+
+
@@ -1333,7 +1688,7 @@ client.post_v3compare_ai_upscalers_async()
-
client.post_v3chyron_plant_async() +
client.llm(...)
@@ -1351,7 +1706,7 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.post_v3chyron_plant_async() +client.llm() ```
@@ -1367,95 +1722,99 @@ client.post_v3chyron_plant_async()
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**example_id:** `typing.Optional[str]`
- -
+
+
+**functions:** `typing.Optional[typing.Sequence[CompareLlmPageRequestFunctionsItem]]` +
-
-
client.post_v3letter_writer_async()
-#### 🔌 Usage +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
+**input_prompt:** `typing.Optional[str]` + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3letter_writer_async() - -``` -
-
+**selected_models:** `typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]]` + -#### ⚙️ Parameters -
+**avoid_repetition:** `typing.Optional[bool]` + +
+
+
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**num_outputs:** `typing.Optional[int]`
- - +
+
+**quality:** `typing.Optional[float]` +
-
-
client.post_v3embeddings_async()
-#### 🔌 Usage +**max_tokens:** `typing.Optional[int]` + +
+
+**sampling_temperature:** `typing.Optional[float]` + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3embeddings_async() - -``` -
-
+**response_format_type:** `typing.Optional[CompareLlmPageRequestResponseFormatType]` + -#### ⚙️ Parameters -
+**settings:** `typing.Optional[RunSettings]` + +
+
+
@@ -1471,7 +1830,7 @@ client.post_v3embeddings_async()
-
client.post_v3related_qna_maker_doc_async() +
client.rag(...)
@@ -1489,7 +1848,9 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.post_v3related_qna_maker_doc_async() +client.rag( + search_query="search_query", +) ```
@@ -1505,102 +1866,116 @@ client.post_v3related_qna_maker_doc_async()
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**search_query:** `str`
- -
+
+
+**example_id:** `typing.Optional[str]` +
-
-
client.health_status_get()
-#### 🔌 Usage +**functions:** `typing.Optional[typing.Sequence[DocSearchPageRequestFunctionsItem]]` + +
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.health_status_get() - -``` -
-
+**keyword_query:** `typing.Optional[DocSearchPageRequestKeywordQuery]` + -#### ⚙️ Parameters -
+**documents:** `typing.Optional[typing.Sequence[str]]` + +
+
+
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**max_references:** `typing.Optional[int]`
+ +
+
+ +**max_context_words:** `typing.Optional[int]` +
+
+
+**scroll_jump:** `typing.Optional[int]` +
-
-## CopilotIntegrations -
client.copilot_integrations.video_bots_stream_create(...)
-#### 🔌 Usage +**doc_extract_url:** `typing.Optional[str]` + +
+
+**embedding_model:** `typing.Optional[DocSearchPageRequestEmbeddingModel]` + +
+
+
-```python -from gooey import Gooey +**dense_weight:** `typing.Optional[float]` -client = Gooey( - api_key="YOUR_API_KEY", -) -client.copilot_integrations.video_bots_stream_create( - integration_id="integration_id", -) -``` -
-
+Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. +Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + -#### ⚙️ Parameters -
+**task_instructions:** `typing.Optional[str]` + +
+
+
-**integration_id:** `str` — Your Integration ID as shown in the Copilot Integrations tab +**query_instructions:** `typing.Optional[str]`
@@ -1608,13 +1983,7 @@ client.copilot_integrations.video_bots_stream_create(
-**conversation_id:** `typing.Optional[str]` - -The gooey conversation ID. - -If not provided, a new conversation will be started and a new ID will be returned in the response. Use this to maintain the state of the conversation between requests. - -Note that you may not provide a custom ID here, and must only use the `conversation_id` returned in a previous response. +**selected_model:** `typing.Optional[DocSearchPageRequestSelectedModel]`
@@ -1622,11 +1991,3195 @@ Note that you may not provide a custom ID here, and must only use the `conversat
-**user_id:** `typing.Optional[str]` +**citation_style:** `typing.Optional[DocSearchPageRequestCitationStyle]` + +
+
-Your app's custom user ID. +
+
-If not provided, a random user will be created and a new ID will be returned in the response. If a `conversation_id` is provided, this field is automatically set to the user's id associated with that conversation. +**avoid_repetition:** `typing.Optional[bool]` + +
+
+ +
+
+ +**num_outputs:** `typing.Optional[int]` + +
+
+ +
+
+ +**quality:** `typing.Optional[float]` + +
+
+ +
+
+ +**max_tokens:** `typing.Optional[int]` + +
+
+ +
+
+ +**sampling_temperature:** `typing.Optional[float]` + +
+
+ +
+
+ +**response_format_type:** `typing.Optional[DocSearchPageRequestResponseFormatType]` + +
+
+ +
+
+ +**settings:** `typing.Optional[RunSettings]` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+ + + + + + +
+ +
client.doc_summary(...) +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.doc_summary() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**documents:** `from __future__ import annotations + +typing.List[core.File]` — See core.File for more documentation + +
+
+ +
+
+ +**example_id:** `typing.Optional[str]` + +
+
+ +
+
+ +**functions:** `typing.Optional[typing.List[DocSummaryRequestFunctionsItem]]` + +
+
+ +
+
+ +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
+ +
+
+ +**task_instructions:** `typing.Optional[str]` + +
+
+ +
+
+ +**merge_instructions:** `typing.Optional[str]` + +
+
+ +
+
+ +**selected_model:** `typing.Optional[DocSummaryRequestSelectedModel]` + +
+
+ +
+
+ +**chain_type:** `typing.Optional[typing.Literal["map_reduce"]]` + +
+
+ +
+
+ +**selected_asr_model:** `typing.Optional[DocSummaryRequestSelectedAsrModel]` + +
+
+ +
+
+ +**google_translate_target:** `typing.Optional[str]` + +
+
+ +
+
+ +**avoid_repetition:** `typing.Optional[bool]` + +
+
+ +
+
+ +**num_outputs:** `typing.Optional[int]` + +
+
+ +
+
+ +**quality:** `typing.Optional[float]` + +
+
+ +
+
+ +**max_tokens:** `typing.Optional[int]` + +
+
+ +
+
+ +**sampling_temperature:** `typing.Optional[float]` + +
+
+ +
+
+ +**response_format_type:** `typing.Optional[DocSummaryRequestResponseFormatType]` + +
+
+ +
+
+ +**settings:** `typing.Optional[RunSettings]` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.lipsync_tts(...) +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.lipsync_tts( + text_prompt="text_prompt", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**text_prompt:** `str` + +
+
+ +
+
+ +**example_id:** `typing.Optional[str]` + +
+
+ +
+
+ +**functions:** `typing.Optional[typing.List[LipsyncTtsRequestFunctionsItem]]` + +
+
+ +
+
+ +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
+ +
+
+ +**tts_provider:** `typing.Optional[LipsyncTtsRequestTtsProvider]` + +
+
+ +
+
+ +**uberduck_voice_name:** `typing.Optional[str]` + +
+
+ +
+
+ +**uberduck_speaking_rate:** `typing.Optional[float]` + +
+
+ +
+
+ +**google_voice_name:** `typing.Optional[str]` + +
+
+ +
+
+ +**google_speaking_rate:** `typing.Optional[float]` + +
+
+ +
+
+ +**google_pitch:** `typing.Optional[float]` + +
+
+ +
+
+ +**bark_history_prompt:** `typing.Optional[str]` + +
+
+ +
+
+ +**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead + +
+
+ +
+
+ +**elevenlabs_api_key:** `typing.Optional[str]` + +
+
+ +
+
+ +**elevenlabs_voice_id:** `typing.Optional[str]` + +
+
+ +
+
+ +**elevenlabs_model:** `typing.Optional[str]` + +
+
+ +
+
+ +**elevenlabs_stability:** `typing.Optional[float]` + +
+
+ +
+
+ +**elevenlabs_similarity_boost:** `typing.Optional[float]` + +
+
+ +
+
+ +**elevenlabs_style:** `typing.Optional[float]` + +
+
+ +
+
+ +**elevenlabs_speaker_boost:** `typing.Optional[bool]` + +
+
+ +
+
+ +**azure_voice_name:** `typing.Optional[str]` + +
+
+ +
+
+ +**openai_voice_name:** `typing.Optional[LipsyncTtsRequestOpenaiVoiceName]` + +
+
+ +
+
+ +**openai_tts_model:** `typing.Optional[LipsyncTtsRequestOpenaiTtsModel]` + +
+
+ +
+
+ +**input_face:** `from __future__ import annotations + +typing.Optional[core.File]` — See core.File for more documentation + +
+
+ +
+
+ +**face_padding_top:** `typing.Optional[int]` + +
+
+ +
+
+ +**face_padding_bottom:** `typing.Optional[int]` + +
+
+ +
+
+ +**face_padding_left:** `typing.Optional[int]` + +
+
+ +
+
+ +**face_padding_right:** `typing.Optional[int]` + +
+
+ +
+
+ +**sadtalker_settings:** `typing.Optional[LipsyncTtsRequestSadtalkerSettings]` + +
+
+ +
+
+ +**selected_model:** `typing.Optional[LipsyncTtsRequestSelectedModel]` + +
+
+ +
+
+ +**settings:** `typing.Optional[RunSettings]` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.text_to_speech(...) +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.text_to_speech( + text_prompt="text_prompt", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**text_prompt:** `str` + +
+
+ +
+
+ +**example_id:** `typing.Optional[str]` + +
+
+ +
+
+ +**functions:** `typing.Optional[typing.Sequence[TextToSpeechPageRequestFunctionsItem]]` + +
+
+ +
+
+ +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
+ +
+
+ +**tts_provider:** `typing.Optional[TextToSpeechPageRequestTtsProvider]` + +
+
+ +
+
+ +**uberduck_voice_name:** `typing.Optional[str]` + +
+
+ +
+
+ +**uberduck_speaking_rate:** `typing.Optional[float]` + +
+
+ +
+
+ +**google_voice_name:** `typing.Optional[str]` + +
+
+ +
+
+ +**google_speaking_rate:** `typing.Optional[float]` + +
+
+ +
+
+ +**google_pitch:** `typing.Optional[float]` + +
+
+ +
+
+ +**bark_history_prompt:** `typing.Optional[str]` + +
+
+ +
+
+ +**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead + +
+
+ +
+
+ +**elevenlabs_api_key:** `typing.Optional[str]` + +
+
+ +
+
+ +**elevenlabs_voice_id:** `typing.Optional[str]` + +
+
+ +
+
+ +**elevenlabs_model:** `typing.Optional[str]` + +
+
+ +
+
+ +**elevenlabs_stability:** `typing.Optional[float]` + +
+
+ +
+
+ +**elevenlabs_similarity_boost:** `typing.Optional[float]` + +
+
+ +
+
+ +**elevenlabs_style:** `typing.Optional[float]` + +
+
+ +
+
+ +**elevenlabs_speaker_boost:** `typing.Optional[bool]` + +
+
+ +
+
+ +**azure_voice_name:** `typing.Optional[str]` + +
+
+ +
+
+ +**openai_voice_name:** `typing.Optional[TextToSpeechPageRequestOpenaiVoiceName]` + +
+
+ +
+
+ +**openai_tts_model:** `typing.Optional[TextToSpeechPageRequestOpenaiTtsModel]` + +
+
+ +
+
+ +**settings:** `typing.Optional[RunSettings]` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.speech_recognition(...) +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.speech_recognition() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**documents:** `from __future__ import annotations + +typing.List[core.File]` — See core.File for more documentation + +
+
+ +
+
+ +**example_id:** `typing.Optional[str]` + +
+
+ +
+
+ +**functions:** `typing.Optional[typing.List[SpeechRecognitionRequestFunctionsItem]]` + +
+
+ +
+
+ +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
+ +
+
+ +**selected_model:** `typing.Optional[SpeechRecognitionRequestSelectedModel]` + +
+
+ +
+
+ +**language:** `typing.Optional[str]` + +
+
+ +
+
+ +**translation_model:** `typing.Optional[SpeechRecognitionRequestTranslationModel]` + +
+
+ +
+
+ +**output_format:** `typing.Optional[SpeechRecognitionRequestOutputFormat]` + +
+
+ +
+
+ +**google_translate_target:** `typing.Optional[str]` — use `translation_model` & `translation_target` instead. + +
+
+ +
+
+ +**translation_source:** `typing.Optional[str]` + +
+
+ +
+
+ +**translation_target:** `typing.Optional[str]` + +
+
+ +
+
+ +**glossary_document:** `from __future__ import annotations + +typing.Optional[core.File]` — See core.File for more documentation + +
+
+ +
+
+ +**settings:** `typing.Optional[RunSettings]` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.text_to_music(...) +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.text_to_music( + text_prompt="text_prompt", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**text_prompt:** `str` + +
+
+ +
+
+ +**example_id:** `typing.Optional[str]` + +
+
+ +
+
+ +**functions:** `typing.Optional[typing.Sequence[Text2AudioPageRequestFunctionsItem]]` + +
+
+ +
+
+ +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
+ +
+
+ +**negative_prompt:** `typing.Optional[str]` + +
+
+ +
+
+ +**duration_sec:** `typing.Optional[float]` + +
+
+ +
+
+ +**num_outputs:** `typing.Optional[int]` + +
+
+ +
+
+ +**quality:** `typing.Optional[int]` + +
+
+ +
+
+ +**guidance_scale:** `typing.Optional[float]` + +
+
+ +
+
+ +**seed:** `typing.Optional[int]` + +
+
+ +
+
+ +**sd2upscaling:** `typing.Optional[bool]` + +
+
+ +
+
+ +**selected_models:** `typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]]` + +
+
+ +
+
+ +**settings:** `typing.Optional[RunSettings]` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.translate(...) +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.translate() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**example_id:** `typing.Optional[str]` + +
+
+ +
+
+ +**functions:** `typing.Optional[typing.List[TranslateRequestFunctionsItem]]` + +
+
+ +
+
+ +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
+ +
+
+ +**texts:** `typing.Optional[typing.List[str]]` + +
+
+ +
+
+ +**selected_model:** `typing.Optional[TranslateRequestSelectedModel]` + +
+
+ +
+
+ +**translation_source:** `typing.Optional[str]` + +
+
+ +
+
+ +**translation_target:** `typing.Optional[str]` + +
+
+ +
+
+ +**glossary_document:** `from __future__ import annotations + +typing.Optional[core.File]` — See core.File for more documentation + +
+
+ +
+
+ +**settings:** `typing.Optional[RunSettings]` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.remix_image(...) +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.remix_image() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**input_image:** `from __future__ import annotations + +core.File` — See core.File for more documentation + +
+
+ +
+
+ +**example_id:** `typing.Optional[str]` + +
+
+ +
+
+ +**functions:** `typing.Optional[typing.List[RemixImageRequestFunctionsItem]]` + +
+
+ +
+
+ +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
+ +
+
+ +**text_prompt:** `typing.Optional[str]` + +
+
+ +
+
+ +**selected_model:** `typing.Optional[RemixImageRequestSelectedModel]` + +
+
+ +
+
+ +**selected_controlnet_model:** `typing.Optional[RemixImageRequestSelectedControlnetModel]` + +
+
+ +
+
+ +**negative_prompt:** `typing.Optional[str]` + +
+
+ +
+
+ +**num_outputs:** `typing.Optional[int]` + +
+
+ +
+
+ +**quality:** `typing.Optional[int]` + +
+
+ +
+
+ +**output_width:** `typing.Optional[int]` + +
+
+ +
+
+ +**output_height:** `typing.Optional[int]` + +
+
+ +
+
+ +**guidance_scale:** `typing.Optional[float]` + +
+
+ +
+
+ +**prompt_strength:** `typing.Optional[float]` + +
+
+ +
+
+ +**controlnet_conditioning_scale:** `typing.Optional[typing.List[float]]` + +
+
+ +
+
+ +**seed:** `typing.Optional[int]` + +
+
+ +
+
+ +**image_guidance_scale:** `typing.Optional[float]` + +
+
+ +
+
+ +**settings:** `typing.Optional[RunSettings]` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.text_to_image(...) +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.text_to_image( + text_prompt="text_prompt", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**text_prompt:** `str` + +
+
+ +
+
+ +**example_id:** `typing.Optional[str]` + +
+
+ +
+
+ +**functions:** `typing.Optional[typing.Sequence[CompareText2ImgPageRequestFunctionsItem]]` + +
+
+ +
+
+ +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
+ +
+
+ +**negative_prompt:** `typing.Optional[str]` + +
+
+ +
+
+ +**output_width:** `typing.Optional[int]` + +
+
+ +
+
+ +**output_height:** `typing.Optional[int]` + +
+
+ +
+
+ +**num_outputs:** `typing.Optional[int]` + +
+
+ +
+
+ +**quality:** `typing.Optional[int]` + +
+
+ +
+
+ +**dall_e3quality:** `typing.Optional[str]` + +
+
+ +
+
+ +**dall_e3style:** `typing.Optional[str]` + +
+
+ +
+
+ +**guidance_scale:** `typing.Optional[float]` + +
+
+ +
+
+ +**seed:** `typing.Optional[int]` + +
+
+ +
+
+ +**sd2upscaling:** `typing.Optional[bool]` + +
+
+ +
+
+ +**selected_models:** `typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]]` + +
+
+ +
+
+ +**scheduler:** `typing.Optional[CompareText2ImgPageRequestScheduler]` + +
+
+ +
+
+ +**edit_instruction:** `typing.Optional[str]` + +
+
+ +
+
+ +**image_guidance_scale:** `typing.Optional[float]` + +
+
+ +
+
+ +**settings:** `typing.Optional[RunSettings]` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.product_image(...) +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.product_image( + text_prompt="text_prompt", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**input_image:** `from __future__ import annotations + +core.File` — See core.File for more documentation + +
+
+ +
+
+ +**text_prompt:** `str` + +
+
+ +
+
+ +**example_id:** `typing.Optional[str]` + +
+
+ +
+
+ +**functions:** `typing.Optional[typing.List[ProductImageRequestFunctionsItem]]` + +
+
+ +
+
+ +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
+ +
+
+ +**obj_scale:** `typing.Optional[float]` + +
+
+ +
+
+ +**obj_pos_x:** `typing.Optional[float]` + +
+
+ +
+
+ +**obj_pos_y:** `typing.Optional[float]` + +
+
+ +
+
+ +**mask_threshold:** `typing.Optional[float]` + +
+
+ +
+
+ +**selected_model:** `typing.Optional[ProductImageRequestSelectedModel]` + +
+
+ +
+
+ +**negative_prompt:** `typing.Optional[str]` + +
+
+ +
+
+ +**num_outputs:** `typing.Optional[int]` + +
+
+ +
+
+ +**quality:** `typing.Optional[int]` + +
+
+ +
+
+ +**output_width:** `typing.Optional[int]` + +
+
+ +
+
+ +**output_height:** `typing.Optional[int]` + +
+
+ +
+
+ +**guidance_scale:** `typing.Optional[float]` + +
+
+ +
+
+ +**sd2upscaling:** `typing.Optional[bool]` + +
+
+ +
+
+ +**seed:** `typing.Optional[int]` + +
+
+ +
+
+ +**settings:** `typing.Optional[RunSettings]` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.portrait(...) +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.portrait( + text_prompt="text_prompt", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**input_image:** `from __future__ import annotations + +core.File` — See core.File for more documentation + +
+
+ +
+
+ +**text_prompt:** `str` + +
+
+ +
+
+ +**example_id:** `typing.Optional[str]` + +
+
+ +
+
+ +**functions:** `typing.Optional[typing.List[PortraitRequestFunctionsItem]]` + +
+
+ +
+
+ +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
+ +
+
+ +**face_scale:** `typing.Optional[float]` + +
+
+ +
+
+ +**face_pos_x:** `typing.Optional[float]` + +
+
+ +
+
+ +**face_pos_y:** `typing.Optional[float]` + +
+
+ +
+
+ +**selected_model:** `typing.Optional[PortraitRequestSelectedModel]` + +
+
+ +
+
+ +**negative_prompt:** `typing.Optional[str]` + +
+
+ +
+
+ +**num_outputs:** `typing.Optional[int]` + +
+
+ +
+
+ +**quality:** `typing.Optional[int]` + +
+
+ +
+
+ +**upscale_factor:** `typing.Optional[float]` + +
+
+ +
+
+ +**output_width:** `typing.Optional[int]` + +
+
+ +
+
+ +**output_height:** `typing.Optional[int]` + +
+
+ +
+
+ +**guidance_scale:** `typing.Optional[float]` + +
+
+ +
+
+ +**seed:** `typing.Optional[int]` + +
+
+ +
+
+ +**settings:** `typing.Optional[RunSettings]` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.image_from_email(...) +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.image_from_email( + email_address="sean@dara.network", + text_prompt="winter's day in paris", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**text_prompt:** `str` + +
+
+ +
+
+ +**example_id:** `typing.Optional[str]` + +
+
+ +
+
+ +**functions:** `typing.Optional[typing.Sequence[EmailFaceInpaintingPageRequestFunctionsItem]]` + +
+
+ +
+
+ +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
+ +
+
+ +**email_address:** `typing.Optional[str]` + +
+
+ +
+
+ +**twitter_handle:** `typing.Optional[str]` + +
+
+ +
+
+ +**face_scale:** `typing.Optional[float]` + +
+
+ +
+
+ +**face_pos_x:** `typing.Optional[float]` + +
+
+ +
+
+ +**face_pos_y:** `typing.Optional[float]` + +
+
+ +
+
+ +**selected_model:** `typing.Optional[EmailFaceInpaintingPageRequestSelectedModel]` + +
+
+ +
+
+ +**negative_prompt:** `typing.Optional[str]` + +
+
+ +
+
+ +**num_outputs:** `typing.Optional[int]` + +
+
+ +
+
+ +**quality:** `typing.Optional[int]` + +
+
+ +
+
+ +**upscale_factor:** `typing.Optional[float]` + +
+
+ +
+
+ +**output_width:** `typing.Optional[int]` + +
+
+ +
+
+ +**output_height:** `typing.Optional[int]` + +
+
+ +
+
+ +**guidance_scale:** `typing.Optional[float]` + +
+
+ +
+
+ +**should_send_email:** `typing.Optional[bool]` + +
+
+ +
+
+ +**email_from:** `typing.Optional[str]` + +
+
+ +
+
+ +**email_cc:** `typing.Optional[str]` + +
+
+ +
+
+ +**email_bcc:** `typing.Optional[str]` + +
+
+ +
+
+ +**email_subject:** `typing.Optional[str]` + +
+
+ +
+
+ +**email_body:** `typing.Optional[str]` + +
+
+ +
+
+ +**email_body_enable_html:** `typing.Optional[bool]` + +
+
+ +
+
+ +**fallback_email_body:** `typing.Optional[str]` + +
+
+ +
+
+ +**seed:** `typing.Optional[int]` + +
+
+ +
+
+ +**settings:** `typing.Optional[RunSettings]` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.image_from_web_search(...) +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.image_from_web_search( + search_query="search_query", + text_prompt="text_prompt", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**search_query:** `str` + +
+
+ +
+
+ +**text_prompt:** `str` + +
+
+ +
+
+ +**example_id:** `typing.Optional[str]` + +
+
+ +
+
+ +**functions:** `typing.Optional[typing.Sequence[GoogleImageGenPageRequestFunctionsItem]]` + +
+
+ +
+
+ +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
+ +
+
+ +**serp_search_location:** `typing.Optional[SerpSearchLocation]` + +
+
+ +
+
+ +**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead + +
+
+ +
+
+ +**selected_model:** `typing.Optional[GoogleImageGenPageRequestSelectedModel]` + +
+
+ +
+
+ +**negative_prompt:** `typing.Optional[str]` + +
+
+ +
+
+ +**num_outputs:** `typing.Optional[int]` + +
+
+ +
+
+ +**quality:** `typing.Optional[int]` + +
+
+ +
+
+ +**guidance_scale:** `typing.Optional[float]` + +
+
+ +
+
+ +**prompt_strength:** `typing.Optional[float]` + +
+
+ +
+
+ +**sd2upscaling:** `typing.Optional[bool]` + +
+
+ +
+
+ +**seed:** `typing.Optional[int]` + +
+
+ +
+
+ +**image_guidance_scale:** `typing.Optional[float]` + +
+
+ +
+
+ +**settings:** `typing.Optional[RunSettings]` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.remove_background(...) +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.remove_background() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**input_image:** `from __future__ import annotations + +core.File` — See core.File for more documentation + +
+
+ +
+
+ +**example_id:** `typing.Optional[str]` + +
+
+ +
+
+ +**functions:** `typing.Optional[typing.List[RemoveBackgroundRequestFunctionsItem]]` + +
+
+ +
+
+ +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
+ +
+
+ +**selected_model:** `typing.Optional[RemoveBackgroundRequestSelectedModel]` + +
+
+ +
+
+ +**mask_threshold:** `typing.Optional[float]` + +
+
+ +
+
+ +**rect_persepective_transform:** `typing.Optional[bool]` + +
+
+ +
+
+ +**reflection_opacity:** `typing.Optional[float]` + +
+
+ +
+
+ +**obj_scale:** `typing.Optional[float]` + +
+
+ +
+
+ +**obj_pos_x:** `typing.Optional[float]` + +
+
+ +
+
+ +**obj_pos_y:** `typing.Optional[float]` + +
+
+ +
+
+ +**settings:** `typing.Optional[RunSettings]` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.upscale(...) +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.upscale( + scale=1, +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**scale:** `int` — The final upsampling scale of the image + +
+
+ +
+
+ +**example_id:** `typing.Optional[str]` + +
+
+ +
+
+ +**functions:** `typing.Optional[typing.List[UpscaleRequestFunctionsItem]]` + +
+
+ +
+
+ +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
+ +
+
+ +**input_image:** `from __future__ import annotations + +typing.Optional[core.File]` — See core.File for more documentation + +
+
+ +
+
+ +**input_video:** `from __future__ import annotations + +typing.Optional[core.File]` — See core.File for more documentation + +
+
+ +
+
+ +**selected_models:** `typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]]` + +
+
+ +
+
+ +**selected_bg_model:** `typing.Optional[typing.Literal["real_esrgan_x2"]]` + +
+
+ +
+
+ +**settings:** `typing.Optional[RunSettings]` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.embed(...) +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.embed( + texts=["texts"], +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**texts:** `typing.Sequence[str]` + +
+
+ +
+
+ +**example_id:** `typing.Optional[str]` + +
+
+ +
+
+ +**functions:** `typing.Optional[typing.Sequence[EmbeddingsPageRequestFunctionsItem]]` + +
+
+ +
+
+ +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
+ +
+
+ +**selected_model:** `typing.Optional[EmbeddingsPageRequestSelectedModel]` + +
+
+ +
+
+ +**settings:** `typing.Optional[RunSettings]` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.seo_people_also_ask_doc(...) +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.seo_people_also_ask_doc( + search_query="search_query", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**search_query:** `str` + +
+
+ +
+
+ +**example_id:** `typing.Optional[str]` + +
+
+ +
+
+ +**functions:** `typing.Optional[typing.Sequence[RelatedQnADocPageRequestFunctionsItem]]` + +
+
+ +
+
+ +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
+ +
+
+ +**keyword_query:** `typing.Optional[RelatedQnADocPageRequestKeywordQuery]` + +
+
+ +
+
+ +**documents:** `typing.Optional[typing.Sequence[str]]` + +
+
+ +
+
+ +**max_references:** `typing.Optional[int]` + +
+
+ +
+
+ +**max_context_words:** `typing.Optional[int]` + +
+
+ +
+
+ +**scroll_jump:** `typing.Optional[int]` + +
+
+ +
+
+ +**doc_extract_url:** `typing.Optional[str]` + +
+
+ +
+
+ +**embedding_model:** `typing.Optional[RelatedQnADocPageRequestEmbeddingModel]` + +
+
+ +
+
+ +**dense_weight:** `typing.Optional[float]` + + +Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. +Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + +
+
+ +
+
+ +**task_instructions:** `typing.Optional[str]` + +
+
+ +
+
+ +**query_instructions:** `typing.Optional[str]` + +
+
+ +
+
+ +**selected_model:** `typing.Optional[RelatedQnADocPageRequestSelectedModel]` + +
+
+ +
+
+ +**citation_style:** `typing.Optional[RelatedQnADocPageRequestCitationStyle]` + +
+
+ +
+
+ +**avoid_repetition:** `typing.Optional[bool]` + +
+
+ +
+
+ +**num_outputs:** `typing.Optional[int]` + +
+
+ +
+
+ +**quality:** `typing.Optional[float]` + +
+
+ +
+
+ +**max_tokens:** `typing.Optional[int]` + +
+
+ +
+
+ +**sampling_temperature:** `typing.Optional[float]` + +
+
+ +
+
+ +**response_format_type:** `typing.Optional[RelatedQnADocPageRequestResponseFormatType]` + +
+
+ +
+
+ +**serp_search_location:** `typing.Optional[SerpSearchLocation]` + +
+
+ +
+
+ +**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead + +
+
+ +
+
+ +**serp_search_type:** `typing.Optional[SerpSearchType]` + +
+
+ +
+
+ +**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead + +
+
+ +
+
+ +**settings:** `typing.Optional[RunSettings]` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.health_status_get() +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.health_status_get() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +## CopilotIntegrations +
client.copilot_integrations.video_bots_stream_create(...) +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.copilot_integrations.video_bots_stream_create( + integration_id="integration_id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**integration_id:** `str` — Your Integration ID as shown in the Copilot Integrations tab + +
+
+ +
+
+ +**conversation_id:** `typing.Optional[str]` + +The gooey conversation ID. + +If not provided, a new conversation will be started and a new ID will be returned in the response. Use this to maintain the state of the conversation between requests. + +Note that you may not provide a custom ID here, and must only use the `conversation_id` returned in a previous response. + +
+
+ +
+
+ +**user_id:** `typing.Optional[str]` + +Your app's custom user ID. + +If not provided, a random user will be created and a new ID will be returned in the response. If a `conversation_id` is provided, this field is automatically set to the user's id associated with that conversation.
@@ -1636,9 +5189,1110 @@ If not provided, a random user will be created and a new ID will be returned in **user_message_id:** `typing.Optional[str]` -Your app's custom message ID for the user message. +Your app's custom message ID for the user message. + +If not provided, a random ID will be generated and returned in the response. This is useful for tracking messages in the conversation. + +
+
+ +
+
+ +**button_pressed:** `typing.Optional[ButtonPressed]` — The button that was pressed by the user. + +
+
+ +
+
+ +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` + +
+
+ +
+
+ +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
+ +
+
+ +**input_prompt:** `typing.Optional[str]` + +
+
+ +
+
+ +**input_audio:** `typing.Optional[str]` + +
+
+ +
+
+ +**input_images:** `typing.Optional[typing.Sequence[str]]` + +
+
+ +
+
+ +**input_documents:** `typing.Optional[typing.Sequence[str]]` + +
+
+ +
+
+ +**doc_extract_url:** `typing.Optional[str]` — Select a workflow to extract text from documents and images. + +
+
+ +
+
+ +**messages:** `typing.Optional[typing.Sequence[ConversationEntry]]` + +
+
+ +
+
+ +**bot_script:** `typing.Optional[str]` + +
+
+ +
+
+ +**selected_model:** `typing.Optional[CreateStreamRequestSelectedModel]` + +
+
+ +
+
+ +**document_model:** `typing.Optional[str]` — When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) + +
+
+ +
+
+ +**task_instructions:** `typing.Optional[str]` + +
+
+ +
+
+ +**query_instructions:** `typing.Optional[str]` + +
+
+ +
+
+ +**keyword_instructions:** `typing.Optional[str]` + +
+
+ +
+
+ +**documents:** `typing.Optional[typing.Sequence[str]]` + +
+
+ +
+
+ +**max_references:** `typing.Optional[int]` + +
+
+ +
+
+ +**max_context_words:** `typing.Optional[int]` + +
+
+ +
+
+ +**scroll_jump:** `typing.Optional[int]` + +
+
+ +
+
+ +**embedding_model:** `typing.Optional[CreateStreamRequestEmbeddingModel]` + +
+
+ +
+
+ +**dense_weight:** `typing.Optional[float]` + + +Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. +Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + +
+
+ +
+
+ +**citation_style:** `typing.Optional[CreateStreamRequestCitationStyle]` + +
+
+ +
+
+ +**use_url_shortener:** `typing.Optional[bool]` + +
+
+ +
+
+ +**asr_model:** `typing.Optional[CreateStreamRequestAsrModel]` — Choose a model to transcribe incoming audio messages to text. + +
+
+ +
+
+ +**asr_language:** `typing.Optional[str]` — Choose a language to transcribe incoming audio messages to text. + +
+
+ +
+
+ +**translation_model:** `typing.Optional[CreateStreamRequestTranslationModel]` + +
+
+ +
+
+ +**user_language:** `typing.Optional[str]` — Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. + +
+
+ +
+
+ +**input_glossary_document:** `typing.Optional[str]` + + +Translation Glossary for User Langauge -> LLM Language (English) + + +
+
+ +
+
+ +**output_glossary_document:** `typing.Optional[str]` + + +Translation Glossary for LLM Language (English) -> User Langauge + + +
+
+ +
+
+ +**lipsync_model:** `typing.Optional[CreateStreamRequestLipsyncModel]` + +
+
+ +
+
+ +**tools:** `typing.Optional[typing.Sequence[LlmTools]]` — Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). + +
+
+ +
+
+ +**avoid_repetition:** `typing.Optional[bool]` + +
+
+ +
+
+ +**num_outputs:** `typing.Optional[int]` + +
+
+ +
+
+ +**quality:** `typing.Optional[float]` + +
+
+ +
+
+ +**max_tokens:** `typing.Optional[int]` + +
+
+ +
+
+ +**sampling_temperature:** `typing.Optional[float]` + +
+
+ +
+
+ +**response_format_type:** `typing.Optional[CreateStreamRequestResponseFormatType]` + +
+
+ +
+
+ +**tts_provider:** `typing.Optional[CreateStreamRequestTtsProvider]` + +
+
+ +
+
+ +**uberduck_voice_name:** `typing.Optional[str]` + +
+
+ +
+
+ +**uberduck_speaking_rate:** `typing.Optional[float]` + +
+
+ +
+
+ +**google_voice_name:** `typing.Optional[str]` + +
+
+ +
+
+ +**google_speaking_rate:** `typing.Optional[float]` + +
+
+ +
+
+ +**google_pitch:** `typing.Optional[float]` + +
+
+ +
+
+ +**bark_history_prompt:** `typing.Optional[str]` + +
+
+ +
+
+ +**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead + +
+
+ +
+
+ +**elevenlabs_api_key:** `typing.Optional[str]` + +
+
+ +
+
+ +**elevenlabs_voice_id:** `typing.Optional[str]` + +
+
+ +
+
+ +**elevenlabs_model:** `typing.Optional[str]` + +
+
+ +
+
+ +**elevenlabs_stability:** `typing.Optional[float]` + +
+
+ +
+
+ +**elevenlabs_similarity_boost:** `typing.Optional[float]` + +
+
+ +
+
+ +**elevenlabs_style:** `typing.Optional[float]` + +
+
+ +
+
+ +**elevenlabs_speaker_boost:** `typing.Optional[bool]` + +
+
+ +
+
+ +**azure_voice_name:** `typing.Optional[str]` + +
+
+ +
+
+ +**openai_voice_name:** `typing.Optional[CreateStreamRequestOpenaiVoiceName]` + +
+
+ +
+
+ +**openai_tts_model:** `typing.Optional[CreateStreamRequestOpenaiTtsModel]` + +
+
+ +
+
+ +**input_face:** `typing.Optional[str]` + +
+
+ +
+
+ +**face_padding_top:** `typing.Optional[int]` + +
+
+ +
+
+ +**face_padding_bottom:** `typing.Optional[int]` + +
+
+ +
+
+ +**face_padding_left:** `typing.Optional[int]` + +
+
+ +
+
+ +**face_padding_right:** `typing.Optional[int]` + +
+
+ +
+
+ +**sadtalker_settings:** `typing.Optional[SadTalkerSettings]` + +
+
+ +
+
+ +**input_text:** `typing.Optional[str]` — Use `input_prompt` instead + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + + + +
+ +
client.copilot_integrations.video_bots_stream(...) +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.copilot_integrations.video_bots_stream( + request_id="request_id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request_id:** `str` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +## CopilotForYourEnterprise +
client.copilot_for_your_enterprise.async_video_bots(...) +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.copilot_for_your_enterprise.async_video_bots() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**example_id:** `typing.Optional[str]` + +
+
+ +
+
+ +**functions:** `typing.Optional[typing.List[AsyncVideoBotsRequestFunctionsItem]]` + +
+
+ +
+
+ +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
+ +
+
+ +**input_prompt:** `typing.Optional[str]` + +
+
+ +
+
+ +**input_audio:** `typing.Optional[str]` + +
+
+ +
+
+ +**input_images:** `from __future__ import annotations + +typing.Optional[typing.List[core.File]]` — See core.File for more documentation + +
+
+ +
+
+ +**input_documents:** `from __future__ import annotations + +typing.Optional[typing.List[core.File]]` — See core.File for more documentation + +
+
+ +
+
+ +**doc_extract_url:** `typing.Optional[str]` — Select a workflow to extract text from documents and images. + +
+
+ +
+
+ +**messages:** `typing.Optional[typing.List[AsyncVideoBotsRequestMessagesItem]]` + +
+
+ +
+
+ +**bot_script:** `typing.Optional[str]` + +
+
+ +
+
+ +**selected_model:** `typing.Optional[AsyncVideoBotsRequestSelectedModel]` + +
+
+ +
+
+ +**document_model:** `typing.Optional[str]` — When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) + +
+
+ +
+
+ +**task_instructions:** `typing.Optional[str]` + +
+
+ +
+
+ +**query_instructions:** `typing.Optional[str]` + +
+
+ +
+
+ +**keyword_instructions:** `typing.Optional[str]` + +
+
+ +
+
+ +**documents:** `from __future__ import annotations + +typing.Optional[typing.List[core.File]]` — See core.File for more documentation + +
+
+ +
+
+ +**max_references:** `typing.Optional[int]` + +
+
+ +
+
+ +**max_context_words:** `typing.Optional[int]` + +
+
+ +
+
+ +**scroll_jump:** `typing.Optional[int]` + +
+
+ +
+
+ +**embedding_model:** `typing.Optional[AsyncVideoBotsRequestEmbeddingModel]` + +
+
+ +
+
+ +**dense_weight:** `typing.Optional[float]` + + +Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. +Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + +
+
+ +
+
+ +**citation_style:** `typing.Optional[AsyncVideoBotsRequestCitationStyle]` + +
+
+ +
+
+ +**use_url_shortener:** `typing.Optional[bool]` + +
+
+ +
+
+ +**asr_model:** `typing.Optional[AsyncVideoBotsRequestAsrModel]` — Choose a model to transcribe incoming audio messages to text. + +
+
+ +
+
+ +**asr_language:** `typing.Optional[str]` — Choose a language to transcribe incoming audio messages to text. + +
+
+ +
+
+ +**translation_model:** `typing.Optional[AsyncVideoBotsRequestTranslationModel]` + +
+
+ +
+
-If not provided, a random ID will be generated and returned in the response. This is useful for tracking messages in the conversation. +**user_language:** `typing.Optional[str]` — Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. + +
+
+ +
+
+ +**input_glossary_document:** `from __future__ import annotations + +typing.Optional[core.File]` — See core.File for more documentation + +
+
+ +
+
+ +**output_glossary_document:** `from __future__ import annotations + +typing.Optional[core.File]` — See core.File for more documentation + +
+
+ +
+
+ +**lipsync_model:** `typing.Optional[AsyncVideoBotsRequestLipsyncModel]` + +
+
+ +
+
+ +**tools:** `typing.Optional[typing.List[typing.Literal["json_to_pdf"]]]` — Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). + +
+
+ +
+
+ +**avoid_repetition:** `typing.Optional[bool]` + +
+
+ +
+
+ +**num_outputs:** `typing.Optional[int]` + +
+
+ +
+
+ +**quality:** `typing.Optional[float]` + +
+
+ +
+
+ +**max_tokens:** `typing.Optional[int]` + +
+
+ +
+
+ +**sampling_temperature:** `typing.Optional[float]` + +
+
+ +
+
+ +**response_format_type:** `typing.Optional[AsyncVideoBotsRequestResponseFormatType]` + +
+
+ +
+
+ +**tts_provider:** `typing.Optional[AsyncVideoBotsRequestTtsProvider]` + +
+
+ +
+
+ +**uberduck_voice_name:** `typing.Optional[str]` + +
+
+ +
+
+ +**uberduck_speaking_rate:** `typing.Optional[float]` + +
+
+ +
+
+ +**google_voice_name:** `typing.Optional[str]` + +
+
+ +
+
+ +**google_speaking_rate:** `typing.Optional[float]` + +
+
+ +
+
+ +**google_pitch:** `typing.Optional[float]` + +
+
+ +
+
+ +**bark_history_prompt:** `typing.Optional[str]` + +
+
+ +
+
+ +**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead + +
+
+ +
+
+ +**elevenlabs_api_key:** `typing.Optional[str]` + +
+
+ +
+
+ +**elevenlabs_voice_id:** `typing.Optional[str]` + +
+
+ +
+
+ +**elevenlabs_model:** `typing.Optional[str]` + +
+
+ +
+
+ +**elevenlabs_stability:** `typing.Optional[float]` + +
+
+ +
+
+ +**elevenlabs_similarity_boost:** `typing.Optional[float]` + +
+
+ +
+
+ +**elevenlabs_style:** `typing.Optional[float]` + +
+
+ +
+
+ +**elevenlabs_speaker_boost:** `typing.Optional[bool]` + +
+
+ +
+
+ +**azure_voice_name:** `typing.Optional[str]` + +
+
+ +
+
+ +**openai_voice_name:** `typing.Optional[AsyncVideoBotsRequestOpenaiVoiceName]` + +
+
+ +
+
+ +**openai_tts_model:** `typing.Optional[AsyncVideoBotsRequestOpenaiTtsModel]` + +
+
+ +
+
+ +**input_face:** `from __future__ import annotations + +typing.Optional[core.File]` — See core.File for more documentation + +
+
+ +
+
+ +**face_padding_top:** `typing.Optional[int]` + +
+
+ +
+
+ +**face_padding_bottom:** `typing.Optional[int]` + +
+
+ +
+
+ +**face_padding_left:** `typing.Optional[int]`
@@ -1646,7 +6300,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**button_pressed:** `typing.Optional[ButtonPressed]` — The button that was pressed by the user. +**face_padding_right:** `typing.Optional[int]`
@@ -1654,7 +6308,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` +**sadtalker_settings:** `typing.Optional[AsyncVideoBotsRequestSadtalkerSettings]`
@@ -1662,7 +6316,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**settings:** `typing.Optional[RunSettings]`
@@ -1670,31 +6324,62 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**input_prompt:** `typing.Optional[str]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
-
-**input_audio:** `typing.Optional[str]` -
+
+## Evaluator +
client.evaluator.async_bulk_eval(...)
-**input_images:** `typing.Optional[typing.Sequence[str]]` - +#### 🔌 Usage + +
+
+ +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.evaluator.async_bulk_eval( + documents=["documents"], +) + +```
+
+
+ +#### ⚙️ Parameters
-**input_documents:** `typing.Optional[typing.Sequence[str]]` +
+
+ +**documents:** `typing.Sequence[str]` + + +Upload or link to a CSV or google sheet that contains your sample input data. +For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. +Remember to includes header names in your CSV too. +
@@ -1702,7 +6387,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**doc_extract_url:** `typing.Optional[str]` — Select a workflow to extract text from documents and images. +**example_id:** `typing.Optional[str]`
@@ -1710,7 +6395,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**messages:** `typing.Optional[typing.Sequence[ConversationEntry]]` +**functions:** `typing.Optional[typing.Sequence[BulkEvalPageRequestFunctionsItem]]`
@@ -1718,7 +6403,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**bot_script:** `typing.Optional[str]` +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -1726,7 +6411,12 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**selected_model:** `typing.Optional[CreateStreamRequestSelectedModel]` +**eval_prompts:** `typing.Optional[typing.Sequence[BulkEvalPageRequestEvalPromptsItem]]` + + +Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values. +_The `columns` dictionary can be used to reference the spreadsheet columns._ +
@@ -1734,7 +6424,11 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**document_model:** `typing.Optional[str]` — When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) +**agg_functions:** `typing.Optional[typing.Sequence[BulkEvalPageRequestAggFunctionsItem]]` + + +Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats). +
@@ -1742,7 +6436,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**task_instructions:** `typing.Optional[str]` +**selected_model:** `typing.Optional[BulkEvalPageRequestSelectedModel]`
@@ -1750,7 +6444,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**query_instructions:** `typing.Optional[str]` +**avoid_repetition:** `typing.Optional[bool]`
@@ -1758,7 +6452,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**keyword_instructions:** `typing.Optional[str]` +**num_outputs:** `typing.Optional[int]`
@@ -1766,7 +6460,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**documents:** `typing.Optional[typing.Sequence[str]]` +**quality:** `typing.Optional[float]`
@@ -1774,7 +6468,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**max_references:** `typing.Optional[int]` +**max_tokens:** `typing.Optional[int]`
@@ -1782,7 +6476,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**max_context_words:** `typing.Optional[int]` +**sampling_temperature:** `typing.Optional[float]`
@@ -1790,7 +6484,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**scroll_jump:** `typing.Optional[int]` +**response_format_type:** `typing.Optional[BulkEvalPageRequestResponseFormatType]`
@@ -1798,7 +6492,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**embedding_model:** `typing.Optional[CreateStreamRequestEmbeddingModel]` +**settings:** `typing.Optional[RunSettings]`
@@ -1806,36 +6500,56 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**dense_weight:** `typing.Optional[float]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
-Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. -Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - -
+
+## SmartGpt +
client.smart_gpt.async_smart_gpt(...)
-**citation_style:** `typing.Optional[CreateStreamRequestCitationStyle]` - -
-
+#### 🔌 Usage
-**use_url_shortener:** `typing.Optional[bool]` - +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.smart_gpt.async_smart_gpt( + input_prompt="input_prompt", +) + +```
+
+
+ +#### ⚙️ Parameters
-**asr_model:** `typing.Optional[CreateStreamRequestAsrModel]` — Choose a model to transcribe incoming audio messages to text. +
+
+ +**input_prompt:** `str`
@@ -1843,7 +6557,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**asr_language:** `typing.Optional[str]` — Choose a language to transcribe incoming audio messages to text. +**example_id:** `typing.Optional[str]`
@@ -1851,7 +6565,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**translation_model:** `typing.Optional[CreateStreamRequestTranslationModel]` +**functions:** `typing.Optional[typing.Sequence[SmartGptPageRequestFunctionsItem]]`
@@ -1859,7 +6573,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**user_language:** `typing.Optional[str]` — Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -1867,11 +6581,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**input_glossary_document:** `typing.Optional[str]` - - -Translation Glossary for User Langauge -> LLM Language (English) - +**cot_prompt:** `typing.Optional[str]`
@@ -1879,11 +6589,7 @@ Translation Glossary for User Langauge -> LLM Language (English)
-**output_glossary_document:** `typing.Optional[str]` - - -Translation Glossary for LLM Language (English) -> User Langauge - +**reflexion_prompt:** `typing.Optional[str]`
@@ -1891,7 +6597,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**lipsync_model:** `typing.Optional[CreateStreamRequestLipsyncModel]` +**dera_prompt:** `typing.Optional[str]`
@@ -1899,7 +6605,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**tools:** `typing.Optional[typing.Sequence[LlmTools]]` — Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). +**selected_model:** `typing.Optional[SmartGptPageRequestSelectedModel]`
@@ -1947,7 +6653,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**response_format_type:** `typing.Optional[CreateStreamRequestResponseFormatType]` +**response_format_type:** `typing.Optional[SmartGptPageRequestResponseFormatType]`
@@ -1955,7 +6661,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**tts_provider:** `typing.Optional[CreateStreamRequestTtsProvider]` +**settings:** `typing.Optional[RunSettings]`
@@ -1963,55 +6669,54 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**uberduck_voice_name:** `typing.Optional[str]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
-
-**uberduck_speaking_rate:** `typing.Optional[float]` -
+
+## Functions +
client.functions.async_functions(...)
-**google_voice_name:** `typing.Optional[str]` - -
-
+#### 🔌 Usage
-**google_speaking_rate:** `typing.Optional[float]` - -
-
-
-**google_pitch:** `typing.Optional[float]` - +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.functions.async_functions() + +```
+ + + +#### ⚙️ Parameters
-**bark_history_prompt:** `typing.Optional[str]` - -
-
-
-**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead +**example_id:** `typing.Optional[str]`
@@ -2019,7 +6724,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**elevenlabs_api_key:** `typing.Optional[str]` +**code:** `typing.Optional[str]` — The JS code to be executed.
@@ -2027,7 +6732,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**elevenlabs_voice_id:** `typing.Optional[str]` +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used in the code
@@ -2035,7 +6740,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**elevenlabs_model:** `typing.Optional[str]` +**settings:** `typing.Optional[RunSettings]`
@@ -2043,39 +6748,54 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**elevenlabs_stability:** `typing.Optional[float]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+ + -
-
-**elevenlabs_similarity_boost:** `typing.Optional[float]` -
+
+## LipSyncing +
client.lip_syncing.async_lipsync(...)
-**elevenlabs_style:** `typing.Optional[float]` - -
-
+#### 🔌 Usage
-**elevenlabs_speaker_boost:** `typing.Optional[bool]` - +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.lip_syncing.async_lipsync() + +``` +
+
+#### ⚙️ Parameters +
-**azure_voice_name:** `typing.Optional[str]` +
+
+ +**example_id:** `typing.Optional[str]`
@@ -2083,7 +6803,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**openai_voice_name:** `typing.Optional[CreateStreamRequestOpenaiVoiceName]` +**functions:** `typing.Optional[typing.List[AsyncLipsyncRequestFunctionsItem]]`
@@ -2091,7 +6811,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**openai_tts_model:** `typing.Optional[CreateStreamRequestOpenaiTtsModel]` +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -2099,7 +6819,9 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**input_face:** `typing.Optional[str]` +**input_face:** `from __future__ import annotations + +typing.Optional[core.File]` — See core.File for more documentation
@@ -2139,15 +6861,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**sadtalker_settings:** `typing.Optional[SadTalkerSettings]` - -
-
- -
-
- -**input_text:** `typing.Optional[str]` — Use `input_prompt` instead +**sadtalker_settings:** `typing.Optional[AsyncLipsyncRequestSadtalkerSettings]`
@@ -2155,55 +6869,25 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**selected_model:** `typing.Optional[AsyncLipsyncRequestSelectedModel]`
-
-
- - - - -
- -
client.copilot_integrations.video_bots_stream(...) -
-
- -#### 🔌 Usage - -
-
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.copilot_integrations.video_bots_stream( - request_id="request_id", -) +**input_audio:** `from __future__ import annotations -``` -
-
+typing.Optional[core.File]` — See core.File for more documentation +
-#### ⚙️ Parameters -
-
-
- -**request_id:** `str` +**settings:** `typing.Optional[RunSettings]`
diff --git a/src/gooey/__init__.py b/src/gooey/__init__.py index 7ddc524..96c44d8 100644 --- a/src/gooey/__init__.py +++ b/src/gooey/__init__.py @@ -21,15 +21,9 @@ BalanceResponse, BotBroadcastFilters, BulkEvalPageOutput, - BulkEvalPageRequest, - BulkEvalPageRequestAggFunctionsItem, - BulkEvalPageRequestAggFunctionsItemFunction, - BulkEvalPageRequestEvalPromptsItem, - BulkEvalPageRequestFunctionsItem, - BulkEvalPageRequestFunctionsItemTrigger, - BulkEvalPageRequestResponseFormatType, - BulkEvalPageRequestSelectedModel, BulkEvalPageStatusResponse, + BulkRunRequestFunctionsItem, + BulkRunRequestFunctionsItemTrigger, BulkRunnerPageOutput, BulkRunnerPageRequest, BulkRunnerPageRequestFunctionsItem, @@ -46,14 +40,12 @@ ChyronPlantPageRequestFunctionsItemTrigger, ChyronPlantPageStatusResponse, CompareLlmPageOutput, - CompareLlmPageRequest, CompareLlmPageRequestFunctionsItem, CompareLlmPageRequestFunctionsItemTrigger, CompareLlmPageRequestResponseFormatType, CompareLlmPageRequestSelectedModelsItem, CompareLlmPageStatusResponse, CompareText2ImgPageOutput, - CompareText2ImgPageRequest, CompareText2ImgPageRequestFunctionsItem, CompareText2ImgPageRequestFunctionsItemTrigger, CompareText2ImgPageRequestScheduler, @@ -76,7 +68,6 @@ ConversationStart, CreateStreamResponse, DeforumSdPageOutput, - DeforumSdPageRequest, DeforumSdPageRequestAnimationPromptsItem, DeforumSdPageRequestFunctionsItem, DeforumSdPageRequestFunctionsItemTrigger, @@ -91,7 +82,6 @@ DocExtractPageRequestSelectedModel, DocExtractPageStatusResponse, DocSearchPageOutput, - DocSearchPageRequest, DocSearchPageRequestCitationStyle, DocSearchPageRequestEmbeddingModel, DocSearchPageRequestFunctionsItem, @@ -108,14 +98,17 @@ DocSummaryPageRequestSelectedAsrModel, DocSummaryPageRequestSelectedModel, DocSummaryPageStatusResponse, + DocSummaryRequestFunctionsItem, + DocSummaryRequestFunctionsItemTrigger, + DocSummaryRequestResponseFormatType, + DocSummaryRequestSelectedAsrModel, + DocSummaryRequestSelectedModel, EmailFaceInpaintingPageOutput, - EmailFaceInpaintingPageRequest, EmailFaceInpaintingPageRequestFunctionsItem, EmailFaceInpaintingPageRequestFunctionsItemTrigger, EmailFaceInpaintingPageRequestSelectedModel, EmailFaceInpaintingPageStatusResponse, EmbeddingsPageOutput, - EmbeddingsPageRequest, EmbeddingsPageRequestFunctionsItem, EmbeddingsPageRequestFunctionsItemTrigger, EmbeddingsPageRequestSelectedModel, @@ -129,12 +122,10 @@ FaceInpaintingPageStatusResponse, FinalResponse, FunctionsPageOutput, - FunctionsPageRequest, FunctionsPageStatusResponse, GenericErrorResponse, GenericErrorResponseDetail, GoogleGptPageOutput, - GoogleGptPageRequest, GoogleGptPageRequestEmbeddingModel, GoogleGptPageRequestFunctionsItem, GoogleGptPageRequestFunctionsItemTrigger, @@ -142,7 +133,6 @@ GoogleGptPageRequestSelectedModel, GoogleGptPageStatusResponse, GoogleImageGenPageOutput, - GoogleImageGenPageRequest, GoogleImageGenPageRequestFunctionsItem, GoogleImageGenPageRequestFunctionsItemTrigger, GoogleImageGenPageRequestSelectedModel, @@ -189,6 +179,14 @@ LipsyncTtsPageRequestSelectedModel, LipsyncTtsPageRequestTtsProvider, LipsyncTtsPageStatusResponse, + LipsyncTtsRequestFunctionsItem, + LipsyncTtsRequestFunctionsItemTrigger, + LipsyncTtsRequestOpenaiTtsModel, + LipsyncTtsRequestOpenaiVoiceName, + LipsyncTtsRequestSadtalkerSettings, + LipsyncTtsRequestSadtalkerSettingsPreprocess, + LipsyncTtsRequestSelectedModel, + LipsyncTtsRequestTtsProvider, LlmTools, MessagePart, ObjectInpaintingPageOutput, @@ -197,6 +195,12 @@ ObjectInpaintingPageRequestFunctionsItemTrigger, ObjectInpaintingPageRequestSelectedModel, ObjectInpaintingPageStatusResponse, + PortraitRequestFunctionsItem, + PortraitRequestFunctionsItemTrigger, + PortraitRequestSelectedModel, + ProductImageRequestFunctionsItem, + ProductImageRequestFunctionsItemTrigger, + ProductImageRequestSelectedModel, PromptTreeNode, PromptTreeNodePrompt, QrCodeGeneratorPageOutput, @@ -209,13 +213,19 @@ QrCodeGeneratorPageRequestSelectedControlnetModelItem, QrCodeGeneratorPageRequestSelectedModel, QrCodeGeneratorPageStatusResponse, + QrCodeRequestFunctionsItem, + QrCodeRequestFunctionsItemTrigger, + QrCodeRequestImagePromptControlnetModelsItem, + QrCodeRequestQrCodeVcard, + QrCodeRequestScheduler, + QrCodeRequestSelectedControlnetModelItem, + QrCodeRequestSelectedModel, RecipeFunction, RecipeFunctionTrigger, RecipeRunState, RelatedDocSearchResponse, RelatedGoogleGptResponse, RelatedQnADocPageOutput, - RelatedQnADocPageRequest, RelatedQnADocPageRequestCitationStyle, RelatedQnADocPageRequestEmbeddingModel, RelatedQnADocPageRequestFunctionsItem, @@ -225,13 +235,20 @@ RelatedQnADocPageRequestSelectedModel, RelatedQnADocPageStatusResponse, RelatedQnAPageOutput, - RelatedQnAPageRequest, RelatedQnAPageRequestEmbeddingModel, RelatedQnAPageRequestFunctionsItem, RelatedQnAPageRequestFunctionsItemTrigger, RelatedQnAPageRequestResponseFormatType, RelatedQnAPageRequestSelectedModel, RelatedQnAPageStatusResponse, + RemixImageRequestFunctionsItem, + RemixImageRequestFunctionsItemTrigger, + RemixImageRequestSelectedControlnetModel, + RemixImageRequestSelectedControlnetModelItem, + RemixImageRequestSelectedModel, + RemoveBackgroundRequestFunctionsItem, + RemoveBackgroundRequestFunctionsItemTrigger, + RemoveBackgroundRequestSelectedModel, ReplyButton, ResponseModel, ResponseModelFinalKeywordQuery, @@ -243,34 +260,35 @@ SadTalkerSettingsPreprocess, SearchReference, SeoSummaryPageOutput, - SeoSummaryPageRequest, SeoSummaryPageRequestResponseFormatType, SeoSummaryPageRequestSelectedModel, SeoSummaryPageStatusResponse, SerpSearchLocation, SerpSearchType, SmartGptPageOutput, - SmartGptPageRequest, - SmartGptPageRequestFunctionsItem, - SmartGptPageRequestFunctionsItemTrigger, - SmartGptPageRequestResponseFormatType, - SmartGptPageRequestSelectedModel, SmartGptPageStatusResponse, SocialLookupEmailPageOutput, - SocialLookupEmailPageRequest, SocialLookupEmailPageRequestFunctionsItem, SocialLookupEmailPageRequestFunctionsItemTrigger, SocialLookupEmailPageRequestResponseFormatType, SocialLookupEmailPageRequestSelectedModel, SocialLookupEmailPageStatusResponse, + SpeechRecognitionRequestFunctionsItem, + SpeechRecognitionRequestFunctionsItemTrigger, + SpeechRecognitionRequestOutputFormat, + SpeechRecognitionRequestSelectedModel, + SpeechRecognitionRequestTranslationModel, StreamError, + SynthesizeDataRequestFunctionsItem, + SynthesizeDataRequestFunctionsItemTrigger, + SynthesizeDataRequestResponseFormatType, + SynthesizeDataRequestSelectedAsrModel, + SynthesizeDataRequestSelectedModel, Text2AudioPageOutput, - Text2AudioPageRequest, Text2AudioPageRequestFunctionsItem, Text2AudioPageRequestFunctionsItemTrigger, Text2AudioPageStatusResponse, TextToSpeechPageOutput, - TextToSpeechPageRequest, TextToSpeechPageRequestFunctionsItem, TextToSpeechPageRequestFunctionsItemTrigger, TextToSpeechPageRequestOpenaiTtsModel, @@ -278,12 +296,18 @@ TextToSpeechPageRequestTtsProvider, TextToSpeechPageStatusResponse, TrainingDataModel, + TranslateRequestFunctionsItem, + TranslateRequestFunctionsItemTrigger, + TranslateRequestSelectedModel, TranslationPageOutput, TranslationPageRequest, TranslationPageRequestFunctionsItem, TranslationPageRequestFunctionsItemTrigger, TranslationPageRequestSelectedModel, TranslationPageStatusResponse, + UpscaleRequestFunctionsItem, + UpscaleRequestFunctionsItemTrigger, + UpscaleRequestSelectedModelsItem, ValidationError, ValidationErrorLocItem, Vcard, @@ -313,9 +337,31 @@ VideoBotsPageRequestTtsProvider, VideoBotsPageStatusResponse, ) -from .errors import PaymentRequiredError, UnprocessableEntityError -from . import copilot_integrations, misc +from .errors import PaymentRequiredError, TooManyRequestsError, UnprocessableEntityError +from . import copilot_for_your_enterprise, copilot_integrations, evaluator, functions, lip_syncing, misc, smart_gpt from .client import AsyncGooey, Gooey +from .copilot_for_your_enterprise import ( + AsyncVideoBotsRequestAsrModel, + AsyncVideoBotsRequestCitationStyle, + AsyncVideoBotsRequestEmbeddingModel, + AsyncVideoBotsRequestFunctionsItem, + AsyncVideoBotsRequestFunctionsItemTrigger, + AsyncVideoBotsRequestLipsyncModel, + AsyncVideoBotsRequestMessagesItem, + AsyncVideoBotsRequestMessagesItemContent, + AsyncVideoBotsRequestMessagesItemContentItem, + AsyncVideoBotsRequestMessagesItemContentItem_ImageUrl, + AsyncVideoBotsRequestMessagesItemContentItem_Text, + AsyncVideoBotsRequestMessagesItemRole, + AsyncVideoBotsRequestOpenaiTtsModel, + AsyncVideoBotsRequestOpenaiVoiceName, + AsyncVideoBotsRequestResponseFormatType, + AsyncVideoBotsRequestSadtalkerSettings, + AsyncVideoBotsRequestSadtalkerSettingsPreprocess, + AsyncVideoBotsRequestSelectedModel, + AsyncVideoBotsRequestTranslationModel, + AsyncVideoBotsRequestTtsProvider, +) from .copilot_integrations import ( CreateStreamRequestAsrModel, CreateStreamRequestCitationStyle, @@ -330,6 +376,28 @@ VideoBotsStreamResponse, ) from .environment import GooeyEnvironment +from .evaluator import ( + BulkEvalPageRequestAggFunctionsItem, + BulkEvalPageRequestAggFunctionsItemFunction, + BulkEvalPageRequestEvalPromptsItem, + BulkEvalPageRequestFunctionsItem, + BulkEvalPageRequestFunctionsItemTrigger, + BulkEvalPageRequestResponseFormatType, + BulkEvalPageRequestSelectedModel, +) +from .lip_syncing import ( + AsyncLipsyncRequestFunctionsItem, + AsyncLipsyncRequestFunctionsItemTrigger, + AsyncLipsyncRequestSadtalkerSettings, + AsyncLipsyncRequestSadtalkerSettingsPreprocess, + AsyncLipsyncRequestSelectedModel, +) +from .smart_gpt import ( + SmartGptPageRequestFunctionsItem, + SmartGptPageRequestFunctionsItemTrigger, + SmartGptPageRequestResponseFormatType, + SmartGptPageRequestSelectedModel, +) from .version import __version__ __all__ = [ @@ -351,10 +419,34 @@ "AsrPageStatusResponse", "AsyncApiResponseModelV3", "AsyncGooey", + "AsyncLipsyncRequestFunctionsItem", + "AsyncLipsyncRequestFunctionsItemTrigger", + "AsyncLipsyncRequestSadtalkerSettings", + "AsyncLipsyncRequestSadtalkerSettingsPreprocess", + "AsyncLipsyncRequestSelectedModel", + "AsyncVideoBotsRequestAsrModel", + "AsyncVideoBotsRequestCitationStyle", + "AsyncVideoBotsRequestEmbeddingModel", + "AsyncVideoBotsRequestFunctionsItem", + "AsyncVideoBotsRequestFunctionsItemTrigger", + "AsyncVideoBotsRequestLipsyncModel", + "AsyncVideoBotsRequestMessagesItem", + "AsyncVideoBotsRequestMessagesItemContent", + "AsyncVideoBotsRequestMessagesItemContentItem", + "AsyncVideoBotsRequestMessagesItemContentItem_ImageUrl", + "AsyncVideoBotsRequestMessagesItemContentItem_Text", + "AsyncVideoBotsRequestMessagesItemRole", + "AsyncVideoBotsRequestOpenaiTtsModel", + "AsyncVideoBotsRequestOpenaiVoiceName", + "AsyncVideoBotsRequestResponseFormatType", + "AsyncVideoBotsRequestSadtalkerSettings", + "AsyncVideoBotsRequestSadtalkerSettingsPreprocess", + "AsyncVideoBotsRequestSelectedModel", + "AsyncVideoBotsRequestTranslationModel", + "AsyncVideoBotsRequestTtsProvider", "BalanceResponse", "BotBroadcastFilters", "BulkEvalPageOutput", - "BulkEvalPageRequest", "BulkEvalPageRequestAggFunctionsItem", "BulkEvalPageRequestAggFunctionsItemFunction", "BulkEvalPageRequestEvalPromptsItem", @@ -363,6 +455,8 @@ "BulkEvalPageRequestResponseFormatType", "BulkEvalPageRequestSelectedModel", "BulkEvalPageStatusResponse", + "BulkRunRequestFunctionsItem", + "BulkRunRequestFunctionsItemTrigger", "BulkRunnerPageOutput", "BulkRunnerPageRequest", "BulkRunnerPageRequestFunctionsItem", @@ -379,14 +473,12 @@ "ChyronPlantPageRequestFunctionsItemTrigger", "ChyronPlantPageStatusResponse", "CompareLlmPageOutput", - "CompareLlmPageRequest", "CompareLlmPageRequestFunctionsItem", "CompareLlmPageRequestFunctionsItemTrigger", "CompareLlmPageRequestResponseFormatType", "CompareLlmPageRequestSelectedModelsItem", "CompareLlmPageStatusResponse", "CompareText2ImgPageOutput", - "CompareText2ImgPageRequest", "CompareText2ImgPageRequestFunctionsItem", "CompareText2ImgPageRequestFunctionsItemTrigger", "CompareText2ImgPageRequestScheduler", @@ -419,7 +511,6 @@ "CreateStreamRequestTtsProvider", "CreateStreamResponse", "DeforumSdPageOutput", - "DeforumSdPageRequest", "DeforumSdPageRequestAnimationPromptsItem", "DeforumSdPageRequestFunctionsItem", "DeforumSdPageRequestFunctionsItemTrigger", @@ -434,7 +525,6 @@ "DocExtractPageRequestSelectedModel", "DocExtractPageStatusResponse", "DocSearchPageOutput", - "DocSearchPageRequest", "DocSearchPageRequestCitationStyle", "DocSearchPageRequestEmbeddingModel", "DocSearchPageRequestFunctionsItem", @@ -451,14 +541,17 @@ "DocSummaryPageRequestSelectedAsrModel", "DocSummaryPageRequestSelectedModel", "DocSummaryPageStatusResponse", + "DocSummaryRequestFunctionsItem", + "DocSummaryRequestFunctionsItemTrigger", + "DocSummaryRequestResponseFormatType", + "DocSummaryRequestSelectedAsrModel", + "DocSummaryRequestSelectedModel", "EmailFaceInpaintingPageOutput", - "EmailFaceInpaintingPageRequest", "EmailFaceInpaintingPageRequestFunctionsItem", "EmailFaceInpaintingPageRequestFunctionsItemTrigger", "EmailFaceInpaintingPageRequestSelectedModel", "EmailFaceInpaintingPageStatusResponse", "EmbeddingsPageOutput", - "EmbeddingsPageRequest", "EmbeddingsPageRequestFunctionsItem", "EmbeddingsPageRequestFunctionsItemTrigger", "EmbeddingsPageRequestSelectedModel", @@ -472,14 +565,12 @@ "FaceInpaintingPageStatusResponse", "FinalResponse", "FunctionsPageOutput", - "FunctionsPageRequest", "FunctionsPageStatusResponse", "GenericErrorResponse", "GenericErrorResponseDetail", "Gooey", "GooeyEnvironment", "GoogleGptPageOutput", - "GoogleGptPageRequest", "GoogleGptPageRequestEmbeddingModel", "GoogleGptPageRequestFunctionsItem", "GoogleGptPageRequestFunctionsItemTrigger", @@ -487,7 +578,6 @@ "GoogleGptPageRequestSelectedModel", "GoogleGptPageStatusResponse", "GoogleImageGenPageOutput", - "GoogleImageGenPageRequest", "GoogleImageGenPageRequestFunctionsItem", "GoogleImageGenPageRequestFunctionsItemTrigger", "GoogleImageGenPageRequestSelectedModel", @@ -534,6 +624,14 @@ "LipsyncTtsPageRequestSelectedModel", "LipsyncTtsPageRequestTtsProvider", "LipsyncTtsPageStatusResponse", + "LipsyncTtsRequestFunctionsItem", + "LipsyncTtsRequestFunctionsItemTrigger", + "LipsyncTtsRequestOpenaiTtsModel", + "LipsyncTtsRequestOpenaiVoiceName", + "LipsyncTtsRequestSadtalkerSettings", + "LipsyncTtsRequestSadtalkerSettingsPreprocess", + "LipsyncTtsRequestSelectedModel", + "LipsyncTtsRequestTtsProvider", "LlmTools", "MessagePart", "ObjectInpaintingPageOutput", @@ -543,6 +641,12 @@ "ObjectInpaintingPageRequestSelectedModel", "ObjectInpaintingPageStatusResponse", "PaymentRequiredError", + "PortraitRequestFunctionsItem", + "PortraitRequestFunctionsItemTrigger", + "PortraitRequestSelectedModel", + "ProductImageRequestFunctionsItem", + "ProductImageRequestFunctionsItemTrigger", + "ProductImageRequestSelectedModel", "PromptTreeNode", "PromptTreeNodePrompt", "QrCodeGeneratorPageOutput", @@ -555,13 +659,19 @@ "QrCodeGeneratorPageRequestSelectedControlnetModelItem", "QrCodeGeneratorPageRequestSelectedModel", "QrCodeGeneratorPageStatusResponse", + "QrCodeRequestFunctionsItem", + "QrCodeRequestFunctionsItemTrigger", + "QrCodeRequestImagePromptControlnetModelsItem", + "QrCodeRequestQrCodeVcard", + "QrCodeRequestScheduler", + "QrCodeRequestSelectedControlnetModelItem", + "QrCodeRequestSelectedModel", "RecipeFunction", "RecipeFunctionTrigger", "RecipeRunState", "RelatedDocSearchResponse", "RelatedGoogleGptResponse", "RelatedQnADocPageOutput", - "RelatedQnADocPageRequest", "RelatedQnADocPageRequestCitationStyle", "RelatedQnADocPageRequestEmbeddingModel", "RelatedQnADocPageRequestFunctionsItem", @@ -571,13 +681,20 @@ "RelatedQnADocPageRequestSelectedModel", "RelatedQnADocPageStatusResponse", "RelatedQnAPageOutput", - "RelatedQnAPageRequest", "RelatedQnAPageRequestEmbeddingModel", "RelatedQnAPageRequestFunctionsItem", "RelatedQnAPageRequestFunctionsItemTrigger", "RelatedQnAPageRequestResponseFormatType", "RelatedQnAPageRequestSelectedModel", "RelatedQnAPageStatusResponse", + "RemixImageRequestFunctionsItem", + "RemixImageRequestFunctionsItemTrigger", + "RemixImageRequestSelectedControlnetModel", + "RemixImageRequestSelectedControlnetModelItem", + "RemixImageRequestSelectedModel", + "RemoveBackgroundRequestFunctionsItem", + "RemoveBackgroundRequestFunctionsItemTrigger", + "RemoveBackgroundRequestSelectedModel", "ReplyButton", "ResponseModel", "ResponseModelFinalKeywordQuery", @@ -589,41 +706,50 @@ "SadTalkerSettingsPreprocess", "SearchReference", "SeoSummaryPageOutput", - "SeoSummaryPageRequest", "SeoSummaryPageRequestResponseFormatType", "SeoSummaryPageRequestSelectedModel", "SeoSummaryPageStatusResponse", "SerpSearchLocation", "SerpSearchType", "SmartGptPageOutput", - "SmartGptPageRequest", "SmartGptPageRequestFunctionsItem", "SmartGptPageRequestFunctionsItemTrigger", "SmartGptPageRequestResponseFormatType", "SmartGptPageRequestSelectedModel", "SmartGptPageStatusResponse", "SocialLookupEmailPageOutput", - "SocialLookupEmailPageRequest", "SocialLookupEmailPageRequestFunctionsItem", "SocialLookupEmailPageRequestFunctionsItemTrigger", "SocialLookupEmailPageRequestResponseFormatType", "SocialLookupEmailPageRequestSelectedModel", "SocialLookupEmailPageStatusResponse", + "SpeechRecognitionRequestFunctionsItem", + "SpeechRecognitionRequestFunctionsItemTrigger", + "SpeechRecognitionRequestOutputFormat", + "SpeechRecognitionRequestSelectedModel", + "SpeechRecognitionRequestTranslationModel", "StreamError", + "SynthesizeDataRequestFunctionsItem", + "SynthesizeDataRequestFunctionsItemTrigger", + "SynthesizeDataRequestResponseFormatType", + "SynthesizeDataRequestSelectedAsrModel", + "SynthesizeDataRequestSelectedModel", "Text2AudioPageOutput", - "Text2AudioPageRequest", "Text2AudioPageRequestFunctionsItem", "Text2AudioPageRequestFunctionsItemTrigger", "Text2AudioPageStatusResponse", "TextToSpeechPageOutput", - "TextToSpeechPageRequest", "TextToSpeechPageRequestFunctionsItem", "TextToSpeechPageRequestFunctionsItemTrigger", "TextToSpeechPageRequestOpenaiTtsModel", "TextToSpeechPageRequestOpenaiVoiceName", "TextToSpeechPageRequestTtsProvider", "TextToSpeechPageStatusResponse", + "TooManyRequestsError", "TrainingDataModel", + "TranslateRequestFunctionsItem", + "TranslateRequestFunctionsItemTrigger", + "TranslateRequestSelectedModel", "TranslationPageOutput", "TranslationPageRequest", "TranslationPageRequestFunctionsItem", @@ -631,6 +757,9 @@ "TranslationPageRequestSelectedModel", "TranslationPageStatusResponse", "UnprocessableEntityError", + "UpscaleRequestFunctionsItem", + "UpscaleRequestFunctionsItemTrigger", + "UpscaleRequestSelectedModelsItem", "ValidationError", "ValidationErrorLocItem", "Vcard", @@ -661,6 +790,11 @@ "VideoBotsPageStatusResponse", "VideoBotsStreamResponse", "__version__", + "copilot_for_your_enterprise", "copilot_integrations", + "evaluator", + "functions", + "lip_syncing", "misc", + "smart_gpt", ] diff --git a/src/gooey/client.py b/src/gooey/client.py index c6ceaa1..a727019 100644 --- a/src/gooey/client.py +++ b/src/gooey/client.py @@ -7,47 +7,145 @@ from .core.api_error import ApiError from .core.client_wrapper import SyncClientWrapper from .copilot_integrations.client import CopilotIntegrationsClient +from .copilot_for_your_enterprise.client import CopilotForYourEnterpriseClient +from .evaluator.client import EvaluatorClient +from .smart_gpt.client import SmartGptClient +from .functions.client import FunctionsClient +from .lip_syncing.client import LipSyncingClient from .misc.client import MiscClient +from .types.deforum_sd_page_request_animation_prompts_item import DeforumSdPageRequestAnimationPromptsItem +from .types.deforum_sd_page_request_functions_item import DeforumSdPageRequestFunctionsItem +from .types.deforum_sd_page_request_selected_model import DeforumSdPageRequestSelectedModel +from .types.run_settings import RunSettings from .core.request_options import RequestOptions -from .types.video_bots_page_status_response import VideoBotsPageStatusResponse +from .types.deforum_sd_page_output import DeforumSdPageOutput from .core.pydantic_utilities import parse_obj_as +from .errors.payment_required_error import PaymentRequiredError +from .errors.unprocessable_entity_error import UnprocessableEntityError +from .types.http_validation_error import HttpValidationError +from .errors.too_many_requests_error import TooManyRequestsError +from .types.generic_error_response import GenericErrorResponse from json.decoder import JSONDecodeError -from .types.deforum_sd_page_status_response import DeforumSdPageStatusResponse -from .types.qr_code_generator_page_status_response import QrCodeGeneratorPageStatusResponse -from .types.related_qn_a_page_status_response import RelatedQnAPageStatusResponse -from .types.seo_summary_page_status_response import SeoSummaryPageStatusResponse -from .types.google_gpt_page_status_response import GoogleGptPageStatusResponse -from .types.social_lookup_email_page_status_response import SocialLookupEmailPageStatusResponse -from .types.bulk_runner_page_status_response import BulkRunnerPageStatusResponse -from .types.bulk_eval_page_status_response import BulkEvalPageStatusResponse -from .types.doc_extract_page_status_response import DocExtractPageStatusResponse -from .types.compare_llm_page_status_response import CompareLlmPageStatusResponse -from .types.doc_search_page_status_response import DocSearchPageStatusResponse -from .types.smart_gpt_page_status_response import SmartGptPageStatusResponse -from .types.doc_summary_page_status_response import DocSummaryPageStatusResponse -from .types.functions_page_status_response import FunctionsPageStatusResponse -from .types.lipsync_page_status_response import LipsyncPageStatusResponse -from .types.lipsync_tts_page_status_response import LipsyncTtsPageStatusResponse -from .types.text_to_speech_page_status_response import TextToSpeechPageStatusResponse -from .types.asr_page_status_response import AsrPageStatusResponse -from .types.text2audio_page_status_response import Text2AudioPageStatusResponse -from .types.translation_page_status_response import TranslationPageStatusResponse -from .types.img2img_page_status_response import Img2ImgPageStatusResponse -from .types.compare_text2img_page_status_response import CompareText2ImgPageStatusResponse -from .types.object_inpainting_page_status_response import ObjectInpaintingPageStatusResponse -from .types.face_inpainting_page_status_response import FaceInpaintingPageStatusResponse -from .types.email_face_inpainting_page_status_response import EmailFaceInpaintingPageStatusResponse -from .types.google_image_gen_page_status_response import GoogleImageGenPageStatusResponse -from .types.image_segmentation_page_status_response import ImageSegmentationPageStatusResponse -from .types.compare_upscaler_page_status_response import CompareUpscalerPageStatusResponse -from .types.chyron_plant_page_status_response import ChyronPlantPageStatusResponse -from .types.letter_writer_page_status_response import LetterWriterPageStatusResponse -from .types.embeddings_page_status_response import EmbeddingsPageStatusResponse -from .types.related_qn_a_doc_page_status_response import RelatedQnADocPageStatusResponse +from .types.qr_code_request_functions_item import QrCodeRequestFunctionsItem +from . import core +from .types.qr_code_request_qr_code_vcard import QrCodeRequestQrCodeVcard +from .types.qr_code_request_image_prompt_controlnet_models_item import QrCodeRequestImagePromptControlnetModelsItem +from .types.qr_code_request_selected_model import QrCodeRequestSelectedModel +from .types.qr_code_request_selected_controlnet_model_item import QrCodeRequestSelectedControlnetModelItem +from .types.qr_code_request_scheduler import QrCodeRequestScheduler +from .types.qr_code_generator_page_output import QrCodeGeneratorPageOutput +from .types.related_qn_a_page_request_functions_item import RelatedQnAPageRequestFunctionsItem +from .types.related_qn_a_page_request_selected_model import RelatedQnAPageRequestSelectedModel +from .types.related_qn_a_page_request_embedding_model import RelatedQnAPageRequestEmbeddingModel +from .types.related_qn_a_page_request_response_format_type import RelatedQnAPageRequestResponseFormatType +from .types.serp_search_location import SerpSearchLocation +from .types.serp_search_type import SerpSearchType +from .types.related_qn_a_page_output import RelatedQnAPageOutput +from .types.seo_summary_page_request_selected_model import SeoSummaryPageRequestSelectedModel +from .types.seo_summary_page_request_response_format_type import SeoSummaryPageRequestResponseFormatType +from .types.seo_summary_page_output import SeoSummaryPageOutput +from .types.google_gpt_page_request_functions_item import GoogleGptPageRequestFunctionsItem +from .types.google_gpt_page_request_selected_model import GoogleGptPageRequestSelectedModel +from .types.google_gpt_page_request_embedding_model import GoogleGptPageRequestEmbeddingModel +from .types.google_gpt_page_request_response_format_type import GoogleGptPageRequestResponseFormatType +from .types.google_gpt_page_output import GoogleGptPageOutput +from .types.social_lookup_email_page_request_functions_item import SocialLookupEmailPageRequestFunctionsItem +from .types.social_lookup_email_page_request_selected_model import SocialLookupEmailPageRequestSelectedModel +from .types.social_lookup_email_page_request_response_format_type import SocialLookupEmailPageRequestResponseFormatType +from .types.social_lookup_email_page_output import SocialLookupEmailPageOutput +from .types.bulk_run_request_functions_item import BulkRunRequestFunctionsItem +from .types.bulk_runner_page_output import BulkRunnerPageOutput +from .types.synthesize_data_request_functions_item import SynthesizeDataRequestFunctionsItem +from .types.synthesize_data_request_selected_asr_model import SynthesizeDataRequestSelectedAsrModel +from .types.synthesize_data_request_selected_model import SynthesizeDataRequestSelectedModel +from .types.synthesize_data_request_response_format_type import SynthesizeDataRequestResponseFormatType +from .types.doc_extract_page_output import DocExtractPageOutput +from .types.compare_llm_page_request_functions_item import CompareLlmPageRequestFunctionsItem +from .types.compare_llm_page_request_selected_models_item import CompareLlmPageRequestSelectedModelsItem +from .types.compare_llm_page_request_response_format_type import CompareLlmPageRequestResponseFormatType +from .types.compare_llm_page_output import CompareLlmPageOutput +from .types.doc_search_page_request_functions_item import DocSearchPageRequestFunctionsItem +from .types.doc_search_page_request_keyword_query import DocSearchPageRequestKeywordQuery +from .types.doc_search_page_request_embedding_model import DocSearchPageRequestEmbeddingModel +from .types.doc_search_page_request_selected_model import DocSearchPageRequestSelectedModel +from .types.doc_search_page_request_citation_style import DocSearchPageRequestCitationStyle +from .types.doc_search_page_request_response_format_type import DocSearchPageRequestResponseFormatType +from .types.doc_search_page_output import DocSearchPageOutput +from .types.doc_summary_request_functions_item import DocSummaryRequestFunctionsItem +from .types.doc_summary_request_selected_model import DocSummaryRequestSelectedModel +from .types.doc_summary_request_selected_asr_model import DocSummaryRequestSelectedAsrModel +from .types.doc_summary_request_response_format_type import DocSummaryRequestResponseFormatType +from .types.doc_summary_page_output import DocSummaryPageOutput +from .types.lipsync_tts_request_functions_item import LipsyncTtsRequestFunctionsItem +from .types.lipsync_tts_request_tts_provider import LipsyncTtsRequestTtsProvider +from .types.lipsync_tts_request_openai_voice_name import LipsyncTtsRequestOpenaiVoiceName +from .types.lipsync_tts_request_openai_tts_model import LipsyncTtsRequestOpenaiTtsModel +from .types.lipsync_tts_request_sadtalker_settings import LipsyncTtsRequestSadtalkerSettings +from .types.lipsync_tts_request_selected_model import LipsyncTtsRequestSelectedModel +from .types.lipsync_tts_page_output import LipsyncTtsPageOutput +from .types.text_to_speech_page_request_functions_item import TextToSpeechPageRequestFunctionsItem +from .types.text_to_speech_page_request_tts_provider import TextToSpeechPageRequestTtsProvider +from .types.text_to_speech_page_request_openai_voice_name import TextToSpeechPageRequestOpenaiVoiceName +from .types.text_to_speech_page_request_openai_tts_model import TextToSpeechPageRequestOpenaiTtsModel +from .types.text_to_speech_page_output import TextToSpeechPageOutput +from .types.speech_recognition_request_functions_item import SpeechRecognitionRequestFunctionsItem +from .types.speech_recognition_request_selected_model import SpeechRecognitionRequestSelectedModel +from .types.speech_recognition_request_translation_model import SpeechRecognitionRequestTranslationModel +from .types.speech_recognition_request_output_format import SpeechRecognitionRequestOutputFormat +from .types.asr_page_output import AsrPageOutput +from .types.text2audio_page_request_functions_item import Text2AudioPageRequestFunctionsItem +from .types.text2audio_page_output import Text2AudioPageOutput +from .types.translate_request_functions_item import TranslateRequestFunctionsItem +from .types.translate_request_selected_model import TranslateRequestSelectedModel +from .types.translation_page_output import TranslationPageOutput +from .types.remix_image_request_functions_item import RemixImageRequestFunctionsItem +from .types.remix_image_request_selected_model import RemixImageRequestSelectedModel +from .types.remix_image_request_selected_controlnet_model import RemixImageRequestSelectedControlnetModel +from .types.img2img_page_output import Img2ImgPageOutput +from .types.compare_text2img_page_request_functions_item import CompareText2ImgPageRequestFunctionsItem +from .types.compare_text2img_page_request_selected_models_item import CompareText2ImgPageRequestSelectedModelsItem +from .types.compare_text2img_page_request_scheduler import CompareText2ImgPageRequestScheduler +from .types.compare_text2img_page_output import CompareText2ImgPageOutput +from .types.product_image_request_functions_item import ProductImageRequestFunctionsItem +from .types.product_image_request_selected_model import ProductImageRequestSelectedModel +from .types.object_inpainting_page_output import ObjectInpaintingPageOutput +from .types.portrait_request_functions_item import PortraitRequestFunctionsItem +from .types.portrait_request_selected_model import PortraitRequestSelectedModel +from .types.face_inpainting_page_output import FaceInpaintingPageOutput +from .types.email_face_inpainting_page_request_functions_item import EmailFaceInpaintingPageRequestFunctionsItem +from .types.email_face_inpainting_page_request_selected_model import EmailFaceInpaintingPageRequestSelectedModel +from .types.email_face_inpainting_page_output import EmailFaceInpaintingPageOutput +from .types.google_image_gen_page_request_functions_item import GoogleImageGenPageRequestFunctionsItem +from .types.google_image_gen_page_request_selected_model import GoogleImageGenPageRequestSelectedModel +from .types.google_image_gen_page_output import GoogleImageGenPageOutput +from .types.remove_background_request_functions_item import RemoveBackgroundRequestFunctionsItem +from .types.remove_background_request_selected_model import RemoveBackgroundRequestSelectedModel +from .types.image_segmentation_page_output import ImageSegmentationPageOutput +from .types.upscale_request_functions_item import UpscaleRequestFunctionsItem +from .types.upscale_request_selected_models_item import UpscaleRequestSelectedModelsItem +from .types.compare_upscaler_page_output import CompareUpscalerPageOutput +from .types.embeddings_page_request_functions_item import EmbeddingsPageRequestFunctionsItem +from .types.embeddings_page_request_selected_model import EmbeddingsPageRequestSelectedModel +from .types.embeddings_page_output import EmbeddingsPageOutput +from .types.related_qn_a_doc_page_request_functions_item import RelatedQnADocPageRequestFunctionsItem +from .types.related_qn_a_doc_page_request_keyword_query import RelatedQnADocPageRequestKeywordQuery +from .types.related_qn_a_doc_page_request_embedding_model import RelatedQnADocPageRequestEmbeddingModel +from .types.related_qn_a_doc_page_request_selected_model import RelatedQnADocPageRequestSelectedModel +from .types.related_qn_a_doc_page_request_citation_style import RelatedQnADocPageRequestCitationStyle +from .types.related_qn_a_doc_page_request_response_format_type import RelatedQnADocPageRequestResponseFormatType +from .types.related_qn_a_doc_page_output import RelatedQnADocPageOutput from .core.client_wrapper import AsyncClientWrapper from .copilot_integrations.client import AsyncCopilotIntegrationsClient +from .copilot_for_your_enterprise.client import AsyncCopilotForYourEnterpriseClient +from .evaluator.client import AsyncEvaluatorClient +from .smart_gpt.client import AsyncSmartGptClient +from .functions.client import AsyncFunctionsClient +from .lip_syncing.client import AsyncLipSyncingClient from .misc.client import AsyncMiscClient +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + class Gooey: """ @@ -110,104 +208,273 @@ def __init__( timeout=_defaulted_timeout, ) self.copilot_integrations = CopilotIntegrationsClient(client_wrapper=self._client_wrapper) + self.copilot_for_your_enterprise = CopilotForYourEnterpriseClient(client_wrapper=self._client_wrapper) + self.evaluator = EvaluatorClient(client_wrapper=self._client_wrapper) + self.smart_gpt = SmartGptClient(client_wrapper=self._client_wrapper) + self.functions = FunctionsClient(client_wrapper=self._client_wrapper) + self.lip_syncing = LipSyncingClient(client_wrapper=self._client_wrapper) self.misc = MiscClient(client_wrapper=self._client_wrapper) - def post_v3video_bots_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> VideoBotsPageStatusResponse: + def animate( + self, + *, + animation_prompts: typing.Sequence[DeforumSdPageRequestAnimationPromptsItem], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[DeforumSdPageRequestFunctionsItem]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + max_frames: typing.Optional[int] = OMIT, + selected_model: typing.Optional[DeforumSdPageRequestSelectedModel] = OMIT, + animation_mode: typing.Optional[str] = OMIT, + zoom: typing.Optional[str] = OMIT, + translation_x: typing.Optional[str] = OMIT, + translation_y: typing.Optional[str] = OMIT, + rotation3d_x: typing.Optional[str] = OMIT, + rotation3d_y: typing.Optional[str] = OMIT, + rotation3d_z: typing.Optional[str] = OMIT, + fps: typing.Optional[int] = OMIT, + seed: typing.Optional[int] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> DeforumSdPageOutput: """ Parameters ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. + animation_prompts : typing.Sequence[DeforumSdPageRequestAnimationPromptsItem] - Returns - ------- - VideoBotsPageStatusResponse - Successful Response + example_id : typing.Optional[str] - Examples - -------- - from gooey import Gooey + functions : typing.Optional[typing.Sequence[DeforumSdPageRequestFunctionsItem]] - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3video_bots_async() - """ - _response = self._client_wrapper.httpx_client.request( - "v3/video-bots/async", - method="POST", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - VideoBotsPageStatusResponse, - parse_obj_as( - type_=VideoBotsPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + max_frames : typing.Optional[int] + + selected_model : typing.Optional[DeforumSdPageRequestSelectedModel] + + animation_mode : typing.Optional[str] + + zoom : typing.Optional[str] + + translation_x : typing.Optional[str] + + translation_y : typing.Optional[str] + + rotation3d_x : typing.Optional[str] + + rotation3d_y : typing.Optional[str] + + rotation3d_z : typing.Optional[str] + + fps : typing.Optional[int] + + seed : typing.Optional[int] + + settings : typing.Optional[RunSettings] - def post_v3deforum_sd_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> DeforumSdPageStatusResponse: - """ - Parameters - ---------- request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - DeforumSdPageStatusResponse + DeforumSdPageOutput Successful Response Examples -------- - from gooey import Gooey + from gooey import DeforumSdPageRequestAnimationPromptsItem, Gooey client = Gooey( api_key="YOUR_API_KEY", ) - client.post_v3deforum_sd_async() + client.animate( + animation_prompts=[ + DeforumSdPageRequestAnimationPromptsItem( + frame="frame", + prompt="prompt", + ) + ], + ) """ _response = self._client_wrapper.httpx_client.request( "v3/DeforumSD/async", method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "animation_prompts": animation_prompts, + "max_frames": max_frames, + "selected_model": selected_model, + "animation_mode": animation_mode, + "zoom": zoom, + "translation_x": translation_x, + "translation_y": translation_y, + "rotation_3d_x": rotation3d_x, + "rotation_3d_y": rotation3d_y, + "rotation_3d_z": rotation3d_z, + "fps": fps, + "seed": seed, + "settings": settings, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - DeforumSdPageStatusResponse, + DeforumSdPageOutput, parse_obj_as( - type_=DeforumSdPageStatusResponse, # type: ignore + type_=DeforumSdPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3art_qr_code_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> QrCodeGeneratorPageStatusResponse: + def qr_code( + self, + *, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.List[QrCodeRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + qr_code_data: typing.Optional[str] = None, + qr_code_input_image: typing.Optional[core.File] = None, + qr_code_vcard: typing.Optional[QrCodeRequestQrCodeVcard] = None, + qr_code_file: typing.Optional[core.File] = None, + use_url_shortener: typing.Optional[bool] = None, + negative_prompt: typing.Optional[str] = None, + image_prompt: typing.Optional[str] = None, + image_prompt_controlnet_models: typing.Optional[ + typing.List[QrCodeRequestImagePromptControlnetModelsItem] + ] = None, + image_prompt_strength: typing.Optional[float] = None, + image_prompt_scale: typing.Optional[float] = None, + image_prompt_pos_x: typing.Optional[float] = None, + image_prompt_pos_y: typing.Optional[float] = None, + selected_model: typing.Optional[QrCodeRequestSelectedModel] = None, + selected_controlnet_model: typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]] = None, + output_width: typing.Optional[int] = None, + output_height: typing.Optional[int] = None, + guidance_scale: typing.Optional[float] = None, + controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[int] = None, + scheduler: typing.Optional[QrCodeRequestScheduler] = None, + seed: typing.Optional[int] = None, + obj_scale: typing.Optional[float] = None, + obj_pos_x: typing.Optional[float] = None, + obj_pos_y: typing.Optional[float] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> QrCodeGeneratorPageOutput: """ Parameters ---------- + text_prompt : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.List[QrCodeRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + qr_code_data : typing.Optional[str] + + qr_code_input_image : typing.Optional[core.File] + See core.File for more documentation + + qr_code_vcard : typing.Optional[QrCodeRequestQrCodeVcard] + + qr_code_file : typing.Optional[core.File] + See core.File for more documentation + + use_url_shortener : typing.Optional[bool] + + negative_prompt : typing.Optional[str] + + image_prompt : typing.Optional[str] + + image_prompt_controlnet_models : typing.Optional[typing.List[QrCodeRequestImagePromptControlnetModelsItem]] + + image_prompt_strength : typing.Optional[float] + + image_prompt_scale : typing.Optional[float] + + image_prompt_pos_x : typing.Optional[float] + + image_prompt_pos_y : typing.Optional[float] + + selected_model : typing.Optional[QrCodeRequestSelectedModel] + + selected_controlnet_model : typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]] + + output_width : typing.Optional[int] + + output_height : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + controlnet_conditioning_scale : typing.Optional[typing.List[float]] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + scheduler : typing.Optional[QrCodeRequestScheduler] + + seed : typing.Optional[int] + + obj_scale : typing.Optional[float] + + obj_pos_x : typing.Optional[float] + + obj_pos_y : typing.Optional[float] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - QrCodeGeneratorPageStatusResponse + QrCodeGeneratorPageOutput Successful Response Examples @@ -217,39 +484,192 @@ def post_v3art_qr_code_async( client = Gooey( api_key="YOUR_API_KEY", ) - client.post_v3art_qr_code_async() + client.qr_code( + text_prompt="text_prompt", + ) """ _response = self._client_wrapper.httpx_client.request( "v3/art-qr-code/async", method="POST", + params={ + "example_id": example_id, + }, + data={ + "functions": functions, + "variables": variables, + "qr_code_data": qr_code_data, + "qr_code_vcard": qr_code_vcard, + "use_url_shortener": use_url_shortener, + "text_prompt": text_prompt, + "negative_prompt": negative_prompt, + "image_prompt": image_prompt, + "image_prompt_controlnet_models": image_prompt_controlnet_models, + "image_prompt_strength": image_prompt_strength, + "image_prompt_scale": image_prompt_scale, + "image_prompt_pos_x": image_prompt_pos_x, + "image_prompt_pos_y": image_prompt_pos_y, + "selected_model": selected_model, + "selected_controlnet_model": selected_controlnet_model, + "output_width": output_width, + "output_height": output_height, + "guidance_scale": guidance_scale, + "controlnet_conditioning_scale": controlnet_conditioning_scale, + "num_outputs": num_outputs, + "quality": quality, + "scheduler": scheduler, + "seed": seed, + "obj_scale": obj_scale, + "obj_pos_x": obj_pos_x, + "obj_pos_y": obj_pos_y, + "settings": settings, + }, + files={ + "qr_code_input_image": qr_code_input_image, + "qr_code_file": qr_code_file, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - QrCodeGeneratorPageStatusResponse, + QrCodeGeneratorPageOutput, parse_obj_as( - type_=QrCodeGeneratorPageStatusResponse, # type: ignore + type_=QrCodeGeneratorPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3related_qna_maker_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> RelatedQnAPageStatusResponse: + def seo_people_also_ask( + self, + *, + search_query: str, + site_filter: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RelatedQnAPageRequestFunctionsItem]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + query_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[RelatedQnAPageRequestSelectedModel] = OMIT, + max_search_urls: typing.Optional[int] = OMIT, + max_references: typing.Optional[int] = OMIT, + max_context_words: typing.Optional[int] = OMIT, + scroll_jump: typing.Optional[int] = OMIT, + embedding_model: typing.Optional[RelatedQnAPageRequestEmbeddingModel] = OMIT, + dense_weight: typing.Optional[float] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[RelatedQnAPageRequestResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, + serp_search_type: typing.Optional[SerpSearchType] = OMIT, + scaleserp_search_field: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> RelatedQnAPageOutput: """ Parameters ---------- + search_query : str + + site_filter : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RelatedQnAPageRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + task_instructions : typing.Optional[str] + + query_instructions : typing.Optional[str] + + selected_model : typing.Optional[RelatedQnAPageRequestSelectedModel] + + max_search_urls : typing.Optional[int] + + max_references : typing.Optional[int] + + max_context_words : typing.Optional[int] + + scroll_jump : typing.Optional[int] + + embedding_model : typing.Optional[RelatedQnAPageRequestEmbeddingModel] + + dense_weight : typing.Optional[float] + + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[RelatedQnAPageRequestResponseFormatType] + + serp_search_location : typing.Optional[SerpSearchLocation] + + scaleserp_locations : typing.Optional[typing.Sequence[str]] + DEPRECATED: use `serp_search_location` instead + + serp_search_type : typing.Optional[SerpSearchType] + + scaleserp_search_field : typing.Optional[str] + DEPRECATED: use `serp_search_type` instead + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - RelatedQnAPageStatusResponse + RelatedQnAPageOutput Successful Response Examples @@ -259,39 +679,172 @@ def post_v3related_qna_maker_async( client = Gooey( api_key="YOUR_API_KEY", ) - client.post_v3related_qna_maker_async() + client.seo_people_also_ask( + search_query="search_query", + site_filter="site_filter", + ) """ _response = self._client_wrapper.httpx_client.request( "v3/related-qna-maker/async", method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "search_query": search_query, + "site_filter": site_filter, + "task_instructions": task_instructions, + "query_instructions": query_instructions, + "selected_model": selected_model, + "max_search_urls": max_search_urls, + "max_references": max_references, + "max_context_words": max_context_words, + "scroll_jump": scroll_jump, + "embedding_model": embedding_model, + "dense_weight": dense_weight, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "serp_search_type": serp_search_type, + "scaleserp_search_field": scaleserp_search_field, + "settings": settings, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - RelatedQnAPageStatusResponse, + RelatedQnAPageOutput, parse_obj_as( - type_=RelatedQnAPageStatusResponse, # type: ignore + type_=RelatedQnAPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3seo_summary_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> SeoSummaryPageStatusResponse: + def seo_content( + self, + *, + search_query: str, + keywords: str, + title: str, + company_url: str, + example_id: typing.Optional[str] = None, + task_instructions: typing.Optional[str] = OMIT, + enable_html: typing.Optional[bool] = OMIT, + selected_model: typing.Optional[SeoSummaryPageRequestSelectedModel] = OMIT, + max_search_urls: typing.Optional[int] = OMIT, + enable_crosslinks: typing.Optional[bool] = OMIT, + seed: typing.Optional[int] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[SeoSummaryPageRequestResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, + serp_search_type: typing.Optional[SerpSearchType] = OMIT, + scaleserp_search_field: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> SeoSummaryPageOutput: """ Parameters ---------- + search_query : str + + keywords : str + + title : str + + company_url : str + + example_id : typing.Optional[str] + + task_instructions : typing.Optional[str] + + enable_html : typing.Optional[bool] + + selected_model : typing.Optional[SeoSummaryPageRequestSelectedModel] + + max_search_urls : typing.Optional[int] + + enable_crosslinks : typing.Optional[bool] + + seed : typing.Optional[int] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[SeoSummaryPageRequestResponseFormatType] + + serp_search_location : typing.Optional[SerpSearchLocation] + + scaleserp_locations : typing.Optional[typing.Sequence[str]] + DEPRECATED: use `serp_search_location` instead + + serp_search_type : typing.Optional[SerpSearchType] + + scaleserp_search_field : typing.Optional[str] + DEPRECATED: use `serp_search_type` instead + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - SeoSummaryPageStatusResponse + SeoSummaryPageOutput Successful Response Examples @@ -301,39 +854,185 @@ def post_v3seo_summary_async( client = Gooey( api_key="YOUR_API_KEY", ) - client.post_v3seo_summary_async() + client.seo_content( + search_query="search_query", + keywords="keywords", + title="title", + company_url="company_url", + ) """ _response = self._client_wrapper.httpx_client.request( "v3/SEOSummary/async", method="POST", + params={ + "example_id": example_id, + }, + json={ + "search_query": search_query, + "keywords": keywords, + "title": title, + "company_url": company_url, + "task_instructions": task_instructions, + "enable_html": enable_html, + "selected_model": selected_model, + "max_search_urls": max_search_urls, + "enable_crosslinks": enable_crosslinks, + "seed": seed, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "serp_search_type": serp_search_type, + "scaleserp_search_field": scaleserp_search_field, + "settings": settings, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - SeoSummaryPageStatusResponse, + SeoSummaryPageOutput, parse_obj_as( - type_=SeoSummaryPageStatusResponse, # type: ignore + type_=SeoSummaryPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3google_gpt_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> GoogleGptPageStatusResponse: + def web_search_llm( + self, + *, + search_query: str, + site_filter: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[GoogleGptPageRequestFunctionsItem]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + query_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[GoogleGptPageRequestSelectedModel] = OMIT, + max_search_urls: typing.Optional[int] = OMIT, + max_references: typing.Optional[int] = OMIT, + max_context_words: typing.Optional[int] = OMIT, + scroll_jump: typing.Optional[int] = OMIT, + embedding_model: typing.Optional[GoogleGptPageRequestEmbeddingModel] = OMIT, + dense_weight: typing.Optional[float] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[GoogleGptPageRequestResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, + serp_search_type: typing.Optional[SerpSearchType] = OMIT, + scaleserp_search_field: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> GoogleGptPageOutput: """ Parameters ---------- + search_query : str + + site_filter : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[GoogleGptPageRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + task_instructions : typing.Optional[str] + + query_instructions : typing.Optional[str] + + selected_model : typing.Optional[GoogleGptPageRequestSelectedModel] + + max_search_urls : typing.Optional[int] + + max_references : typing.Optional[int] + + max_context_words : typing.Optional[int] + + scroll_jump : typing.Optional[int] + + embedding_model : typing.Optional[GoogleGptPageRequestEmbeddingModel] + + dense_weight : typing.Optional[float] + + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[GoogleGptPageRequestResponseFormatType] + + serp_search_location : typing.Optional[SerpSearchLocation] + + scaleserp_locations : typing.Optional[typing.Sequence[str]] + DEPRECATED: use `serp_search_location` instead + + serp_search_type : typing.Optional[SerpSearchType] + + scaleserp_search_field : typing.Optional[str] + DEPRECATED: use `serp_search_type` instead + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - GoogleGptPageStatusResponse + GoogleGptPageOutput Successful Response Examples @@ -343,39 +1042,144 @@ def post_v3google_gpt_async( client = Gooey( api_key="YOUR_API_KEY", ) - client.post_v3google_gpt_async() + client.web_search_llm( + search_query="search_query", + site_filter="site_filter", + ) """ _response = self._client_wrapper.httpx_client.request( "v3/google-gpt/async", method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "search_query": search_query, + "site_filter": site_filter, + "task_instructions": task_instructions, + "query_instructions": query_instructions, + "selected_model": selected_model, + "max_search_urls": max_search_urls, + "max_references": max_references, + "max_context_words": max_context_words, + "scroll_jump": scroll_jump, + "embedding_model": embedding_model, + "dense_weight": dense_weight, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "serp_search_type": serp_search_type, + "scaleserp_search_field": scaleserp_search_field, + "settings": settings, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - GoogleGptPageStatusResponse, + GoogleGptPageOutput, parse_obj_as( - type_=GoogleGptPageStatusResponse, # type: ignore + type_=GoogleGptPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3social_lookup_email_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> SocialLookupEmailPageStatusResponse: + def personalize_email( + self, + *, + email_address: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[SocialLookupEmailPageRequestFunctionsItem]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + input_prompt: typing.Optional[str] = OMIT, + selected_model: typing.Optional[SocialLookupEmailPageRequestSelectedModel] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[SocialLookupEmailPageRequestResponseFormatType] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> SocialLookupEmailPageOutput: """ Parameters ---------- + email_address : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[SocialLookupEmailPageRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + input_prompt : typing.Optional[str] + + selected_model : typing.Optional[SocialLookupEmailPageRequestSelectedModel] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[SocialLookupEmailPageRequestResponseFormatType] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - SocialLookupEmailPageStatusResponse + SocialLookupEmailPageOutput Successful Response Examples @@ -385,39 +1189,133 @@ def post_v3social_lookup_email_async( client = Gooey( api_key="YOUR_API_KEY", ) - client.post_v3social_lookup_email_async() + client.personalize_email( + email_address="email_address", + ) """ _response = self._client_wrapper.httpx_client.request( "v3/SocialLookupEmail/async", method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "email_address": email_address, + "input_prompt": input_prompt, + "selected_model": selected_model, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - SocialLookupEmailPageStatusResponse, + SocialLookupEmailPageOutput, parse_obj_as( - type_=SocialLookupEmailPageStatusResponse, # type: ignore + type_=SocialLookupEmailPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3bulk_runner_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> BulkRunnerPageStatusResponse: + def bulk_run( + self, + *, + documents: typing.List[core.File], + run_urls: typing.List[str], + input_columns: typing.Dict[str, str], + output_columns: typing.Dict[str, str], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.List[BulkRunRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + eval_urls: typing.Optional[typing.List[str]] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> BulkRunnerPageOutput: """ Parameters ---------- + documents : typing.List[core.File] + See core.File for more documentation + + run_urls : typing.List[str] + + Provide one or more Gooey.AI workflow runs. + You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them. + + + input_columns : typing.Dict[str, str] + + For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it. + + + output_columns : typing.Dict[str, str] + + For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data. + + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.List[BulkRunRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + eval_urls : typing.Optional[typing.List[str]] + + _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs. + + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - BulkRunnerPageStatusResponse + BulkRunnerPageOutput Successful Response Examples @@ -427,39 +1325,146 @@ def post_v3bulk_runner_async( client = Gooey( api_key="YOUR_API_KEY", ) - client.post_v3bulk_runner_async() + client.bulk_run( + run_urls=["run_urls"], + input_columns={"key": "value"}, + output_columns={"key": "value"}, + ) """ _response = self._client_wrapper.httpx_client.request( "v3/bulk-runner/async", method="POST", + params={ + "example_id": example_id, + }, + data={ + "functions": functions, + "variables": variables, + "run_urls": run_urls, + "input_columns": input_columns, + "output_columns": output_columns, + "eval_urls": eval_urls, + "settings": settings, + }, + files={ + "documents": documents, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - BulkRunnerPageStatusResponse, + BulkRunnerPageOutput, parse_obj_as( - type_=BulkRunnerPageStatusResponse, # type: ignore + type_=BulkRunnerPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3bulk_eval_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> BulkEvalPageStatusResponse: + def synthesize_data( + self, + *, + documents: typing.List[core.File], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.List[SynthesizeDataRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + sheet_url: typing.Optional[core.File] = None, + selected_asr_model: typing.Optional[SynthesizeDataRequestSelectedAsrModel] = None, + google_translate_target: typing.Optional[str] = None, + glossary_document: typing.Optional[core.File] = None, + task_instructions: typing.Optional[str] = None, + selected_model: typing.Optional[SynthesizeDataRequestSelectedModel] = None, + avoid_repetition: typing.Optional[bool] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[float] = None, + max_tokens: typing.Optional[int] = None, + sampling_temperature: typing.Optional[float] = None, + response_format_type: typing.Optional[SynthesizeDataRequestResponseFormatType] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> DocExtractPageOutput: """ Parameters ---------- + documents : typing.List[core.File] + See core.File for more documentation + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.List[SynthesizeDataRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + sheet_url : typing.Optional[core.File] + See core.File for more documentation + + selected_asr_model : typing.Optional[SynthesizeDataRequestSelectedAsrModel] + + google_translate_target : typing.Optional[str] + + glossary_document : typing.Optional[core.File] + See core.File for more documentation + + task_instructions : typing.Optional[str] + + selected_model : typing.Optional[SynthesizeDataRequestSelectedModel] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[SynthesizeDataRequestResponseFormatType] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - BulkEvalPageStatusResponse + DocExtractPageOutput Successful Response Examples @@ -469,39 +1474,132 @@ def post_v3bulk_eval_async( client = Gooey( api_key="YOUR_API_KEY", ) - client.post_v3bulk_eval_async() + client.synthesize_data() """ _response = self._client_wrapper.httpx_client.request( - "v3/bulk-eval/async", + "v3/doc-extract/async", method="POST", + params={ + "example_id": example_id, + }, + data={ + "functions": functions, + "variables": variables, + "selected_asr_model": selected_asr_model, + "google_translate_target": google_translate_target, + "task_instructions": task_instructions, + "selected_model": selected_model, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, + files={ + "documents": documents, + "sheet_url": sheet_url, + "glossary_document": glossary_document, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - BulkEvalPageStatusResponse, + DocExtractPageOutput, parse_obj_as( - type_=BulkEvalPageStatusResponse, # type: ignore + type_=DocExtractPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3doc_extract_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> DocExtractPageStatusResponse: + def llm( + self, + *, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[CompareLlmPageRequestFunctionsItem]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + input_prompt: typing.Optional[str] = OMIT, + selected_models: typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[CompareLlmPageRequestResponseFormatType] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> CompareLlmPageOutput: """ Parameters ---------- + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[CompareLlmPageRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + input_prompt : typing.Optional[str] + + selected_models : typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[CompareLlmPageRequestResponseFormatType] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - DocExtractPageStatusResponse + CompareLlmPageOutput Successful Response Examples @@ -511,39 +1609,162 @@ def post_v3doc_extract_async( client = Gooey( api_key="YOUR_API_KEY", ) - client.post_v3doc_extract_async() + client.llm() """ _response = self._client_wrapper.httpx_client.request( - "v3/doc-extract/async", + "v3/CompareLLM/async", method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "input_prompt": input_prompt, + "selected_models": selected_models, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - DocExtractPageStatusResponse, + CompareLlmPageOutput, parse_obj_as( - type_=DocExtractPageStatusResponse, # type: ignore + type_=CompareLlmPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3compare_llm_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> CompareLlmPageStatusResponse: + def rag( + self, + *, + search_query: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[DocSearchPageRequestFunctionsItem]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + keyword_query: typing.Optional[DocSearchPageRequestKeywordQuery] = OMIT, + documents: typing.Optional[typing.Sequence[str]] = OMIT, + max_references: typing.Optional[int] = OMIT, + max_context_words: typing.Optional[int] = OMIT, + scroll_jump: typing.Optional[int] = OMIT, + doc_extract_url: typing.Optional[str] = OMIT, + embedding_model: typing.Optional[DocSearchPageRequestEmbeddingModel] = OMIT, + dense_weight: typing.Optional[float] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + query_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[DocSearchPageRequestSelectedModel] = OMIT, + citation_style: typing.Optional[DocSearchPageRequestCitationStyle] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[DocSearchPageRequestResponseFormatType] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> DocSearchPageOutput: """ Parameters ---------- + search_query : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[DocSearchPageRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + keyword_query : typing.Optional[DocSearchPageRequestKeywordQuery] + + documents : typing.Optional[typing.Sequence[str]] + + max_references : typing.Optional[int] + + max_context_words : typing.Optional[int] + + scroll_jump : typing.Optional[int] + + doc_extract_url : typing.Optional[str] + + embedding_model : typing.Optional[DocSearchPageRequestEmbeddingModel] + + dense_weight : typing.Optional[float] + + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + + task_instructions : typing.Optional[str] + + query_instructions : typing.Optional[str] + + selected_model : typing.Optional[DocSearchPageRequestSelectedModel] + + citation_style : typing.Optional[DocSearchPageRequestCitationStyle] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[DocSearchPageRequestResponseFormatType] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - CompareLlmPageStatusResponse + DocSearchPageOutput Successful Response Examples @@ -553,39 +1774,154 @@ def post_v3compare_llm_async( client = Gooey( api_key="YOUR_API_KEY", ) - client.post_v3compare_llm_async() + client.rag( + search_query="search_query", + ) """ _response = self._client_wrapper.httpx_client.request( - "v3/CompareLLM/async", + "v3/doc-search/async", method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "search_query": search_query, + "keyword_query": keyword_query, + "documents": documents, + "max_references": max_references, + "max_context_words": max_context_words, + "scroll_jump": scroll_jump, + "doc_extract_url": doc_extract_url, + "embedding_model": embedding_model, + "dense_weight": dense_weight, + "task_instructions": task_instructions, + "query_instructions": query_instructions, + "selected_model": selected_model, + "citation_style": citation_style, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - CompareLlmPageStatusResponse, + DocSearchPageOutput, parse_obj_as( - type_=CompareLlmPageStatusResponse, # type: ignore + type_=DocSearchPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3doc_search_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> DocSearchPageStatusResponse: + def doc_summary( + self, + *, + documents: typing.List[core.File], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.List[DocSummaryRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + task_instructions: typing.Optional[str] = None, + merge_instructions: typing.Optional[str] = None, + selected_model: typing.Optional[DocSummaryRequestSelectedModel] = None, + chain_type: typing.Optional[typing.Literal["map_reduce"]] = None, + selected_asr_model: typing.Optional[DocSummaryRequestSelectedAsrModel] = None, + google_translate_target: typing.Optional[str] = None, + avoid_repetition: typing.Optional[bool] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[float] = None, + max_tokens: typing.Optional[int] = None, + sampling_temperature: typing.Optional[float] = None, + response_format_type: typing.Optional[DocSummaryRequestResponseFormatType] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> DocSummaryPageOutput: """ Parameters ---------- + documents : typing.List[core.File] + See core.File for more documentation + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.List[DocSummaryRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + task_instructions : typing.Optional[str] + + merge_instructions : typing.Optional[str] + + selected_model : typing.Optional[DocSummaryRequestSelectedModel] + + chain_type : typing.Optional[typing.Literal["map_reduce"]] + + selected_asr_model : typing.Optional[DocSummaryRequestSelectedAsrModel] + + google_translate_target : typing.Optional[str] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[DocSummaryRequestResponseFormatType] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - DocSearchPageStatusResponse + DocSummaryPageOutput Successful Response Examples @@ -595,39 +1931,188 @@ def post_v3doc_search_async( client = Gooey( api_key="YOUR_API_KEY", ) - client.post_v3doc_search_async() + client.doc_summary() """ _response = self._client_wrapper.httpx_client.request( - "v3/doc-search/async", + "v3/doc-summary/async", method="POST", + params={ + "example_id": example_id, + }, + data={ + "functions": functions, + "variables": variables, + "task_instructions": task_instructions, + "merge_instructions": merge_instructions, + "selected_model": selected_model, + "chain_type": chain_type, + "selected_asr_model": selected_asr_model, + "google_translate_target": google_translate_target, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, + files={ + "documents": documents, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - DocSearchPageStatusResponse, + DocSummaryPageOutput, parse_obj_as( - type_=DocSearchPageStatusResponse, # type: ignore + type_=DocSummaryPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3smart_gpt_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> SmartGptPageStatusResponse: + def lipsync_tts( + self, + *, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.List[LipsyncTtsRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + tts_provider: typing.Optional[LipsyncTtsRequestTtsProvider] = None, + uberduck_voice_name: typing.Optional[str] = None, + uberduck_speaking_rate: typing.Optional[float] = None, + google_voice_name: typing.Optional[str] = None, + google_speaking_rate: typing.Optional[float] = None, + google_pitch: typing.Optional[float] = None, + bark_history_prompt: typing.Optional[str] = None, + elevenlabs_voice_name: typing.Optional[str] = None, + elevenlabs_api_key: typing.Optional[str] = None, + elevenlabs_voice_id: typing.Optional[str] = None, + elevenlabs_model: typing.Optional[str] = None, + elevenlabs_stability: typing.Optional[float] = None, + elevenlabs_similarity_boost: typing.Optional[float] = None, + elevenlabs_style: typing.Optional[float] = None, + elevenlabs_speaker_boost: typing.Optional[bool] = None, + azure_voice_name: typing.Optional[str] = None, + openai_voice_name: typing.Optional[LipsyncTtsRequestOpenaiVoiceName] = None, + openai_tts_model: typing.Optional[LipsyncTtsRequestOpenaiTtsModel] = None, + input_face: typing.Optional[core.File] = None, + face_padding_top: typing.Optional[int] = None, + face_padding_bottom: typing.Optional[int] = None, + face_padding_left: typing.Optional[int] = None, + face_padding_right: typing.Optional[int] = None, + sadtalker_settings: typing.Optional[LipsyncTtsRequestSadtalkerSettings] = None, + selected_model: typing.Optional[LipsyncTtsRequestSelectedModel] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> LipsyncTtsPageOutput: """ Parameters ---------- + text_prompt : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.List[LipsyncTtsRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + tts_provider : typing.Optional[LipsyncTtsRequestTtsProvider] + + uberduck_voice_name : typing.Optional[str] + + uberduck_speaking_rate : typing.Optional[float] + + google_voice_name : typing.Optional[str] + + google_speaking_rate : typing.Optional[float] + + google_pitch : typing.Optional[float] + + bark_history_prompt : typing.Optional[str] + + elevenlabs_voice_name : typing.Optional[str] + Use `elevenlabs_voice_id` instead + + elevenlabs_api_key : typing.Optional[str] + + elevenlabs_voice_id : typing.Optional[str] + + elevenlabs_model : typing.Optional[str] + + elevenlabs_stability : typing.Optional[float] + + elevenlabs_similarity_boost : typing.Optional[float] + + elevenlabs_style : typing.Optional[float] + + elevenlabs_speaker_boost : typing.Optional[bool] + + azure_voice_name : typing.Optional[str] + + openai_voice_name : typing.Optional[LipsyncTtsRequestOpenaiVoiceName] + + openai_tts_model : typing.Optional[LipsyncTtsRequestOpenaiTtsModel] + + input_face : typing.Optional[core.File] + See core.File for more documentation + + face_padding_top : typing.Optional[int] + + face_padding_bottom : typing.Optional[int] + + face_padding_left : typing.Optional[int] + + face_padding_right : typing.Optional[int] + + sadtalker_settings : typing.Optional[LipsyncTtsRequestSadtalkerSettings] + + selected_model : typing.Optional[LipsyncTtsRequestSelectedModel] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - SmartGptPageStatusResponse + LipsyncTtsPageOutput Successful Response Examples @@ -637,39 +2122,181 @@ def post_v3smart_gpt_async( client = Gooey( api_key="YOUR_API_KEY", ) - client.post_v3smart_gpt_async() + client.lipsync_tts( + text_prompt="text_prompt", + ) """ _response = self._client_wrapper.httpx_client.request( - "v3/SmartGPT/async", + "v3/LipsyncTTS/async", method="POST", + params={ + "example_id": example_id, + }, + data={ + "functions": functions, + "variables": variables, + "text_prompt": text_prompt, + "tts_provider": tts_provider, + "uberduck_voice_name": uberduck_voice_name, + "uberduck_speaking_rate": uberduck_speaking_rate, + "google_voice_name": google_voice_name, + "google_speaking_rate": google_speaking_rate, + "google_pitch": google_pitch, + "bark_history_prompt": bark_history_prompt, + "elevenlabs_voice_name": elevenlabs_voice_name, + "elevenlabs_api_key": elevenlabs_api_key, + "elevenlabs_voice_id": elevenlabs_voice_id, + "elevenlabs_model": elevenlabs_model, + "elevenlabs_stability": elevenlabs_stability, + "elevenlabs_similarity_boost": elevenlabs_similarity_boost, + "elevenlabs_style": elevenlabs_style, + "elevenlabs_speaker_boost": elevenlabs_speaker_boost, + "azure_voice_name": azure_voice_name, + "openai_voice_name": openai_voice_name, + "openai_tts_model": openai_tts_model, + "face_padding_top": face_padding_top, + "face_padding_bottom": face_padding_bottom, + "face_padding_left": face_padding_left, + "face_padding_right": face_padding_right, + "sadtalker_settings": sadtalker_settings, + "selected_model": selected_model, + "settings": settings, + }, + files={ + "input_face": input_face, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - SmartGptPageStatusResponse, + LipsyncTtsPageOutput, parse_obj_as( - type_=SmartGptPageStatusResponse, # type: ignore + type_=LipsyncTtsPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3doc_summary_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> DocSummaryPageStatusResponse: + def text_to_speech( + self, + *, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[TextToSpeechPageRequestFunctionsItem]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + tts_provider: typing.Optional[TextToSpeechPageRequestTtsProvider] = OMIT, + uberduck_voice_name: typing.Optional[str] = OMIT, + uberduck_speaking_rate: typing.Optional[float] = OMIT, + google_voice_name: typing.Optional[str] = OMIT, + google_speaking_rate: typing.Optional[float] = OMIT, + google_pitch: typing.Optional[float] = OMIT, + bark_history_prompt: typing.Optional[str] = OMIT, + elevenlabs_voice_name: typing.Optional[str] = OMIT, + elevenlabs_api_key: typing.Optional[str] = OMIT, + elevenlabs_voice_id: typing.Optional[str] = OMIT, + elevenlabs_model: typing.Optional[str] = OMIT, + elevenlabs_stability: typing.Optional[float] = OMIT, + elevenlabs_similarity_boost: typing.Optional[float] = OMIT, + elevenlabs_style: typing.Optional[float] = OMIT, + elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, + azure_voice_name: typing.Optional[str] = OMIT, + openai_voice_name: typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] = OMIT, + openai_tts_model: typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> TextToSpeechPageOutput: """ Parameters ---------- + text_prompt : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[TextToSpeechPageRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + tts_provider : typing.Optional[TextToSpeechPageRequestTtsProvider] + + uberduck_voice_name : typing.Optional[str] + + uberduck_speaking_rate : typing.Optional[float] + + google_voice_name : typing.Optional[str] + + google_speaking_rate : typing.Optional[float] + + google_pitch : typing.Optional[float] + + bark_history_prompt : typing.Optional[str] + + elevenlabs_voice_name : typing.Optional[str] + Use `elevenlabs_voice_id` instead + + elevenlabs_api_key : typing.Optional[str] + + elevenlabs_voice_id : typing.Optional[str] + + elevenlabs_model : typing.Optional[str] + + elevenlabs_stability : typing.Optional[float] + + elevenlabs_similarity_boost : typing.Optional[float] + + elevenlabs_style : typing.Optional[float] + + elevenlabs_speaker_boost : typing.Optional[bool] + + azure_voice_name : typing.Optional[str] + + openai_voice_name : typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] + + openai_tts_model : typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - DocSummaryPageStatusResponse + TextToSpeechPageOutput Successful Response Examples @@ -679,39 +2306,144 @@ def post_v3doc_summary_async( client = Gooey( api_key="YOUR_API_KEY", ) - client.post_v3doc_summary_async() + client.text_to_speech( + text_prompt="text_prompt", + ) """ _response = self._client_wrapper.httpx_client.request( - "v3/doc-summary/async", + "v3/TextToSpeech/async", method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "text_prompt": text_prompt, + "tts_provider": tts_provider, + "uberduck_voice_name": uberduck_voice_name, + "uberduck_speaking_rate": uberduck_speaking_rate, + "google_voice_name": google_voice_name, + "google_speaking_rate": google_speaking_rate, + "google_pitch": google_pitch, + "bark_history_prompt": bark_history_prompt, + "elevenlabs_voice_name": elevenlabs_voice_name, + "elevenlabs_api_key": elevenlabs_api_key, + "elevenlabs_voice_id": elevenlabs_voice_id, + "elevenlabs_model": elevenlabs_model, + "elevenlabs_stability": elevenlabs_stability, + "elevenlabs_similarity_boost": elevenlabs_similarity_boost, + "elevenlabs_style": elevenlabs_style, + "elevenlabs_speaker_boost": elevenlabs_speaker_boost, + "azure_voice_name": azure_voice_name, + "openai_voice_name": openai_voice_name, + "openai_tts_model": openai_tts_model, + "settings": settings, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - DocSummaryPageStatusResponse, + TextToSpeechPageOutput, parse_obj_as( - type_=DocSummaryPageStatusResponse, # type: ignore + type_=TextToSpeechPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3functions_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> FunctionsPageStatusResponse: + def speech_recognition( + self, + *, + documents: typing.List[core.File], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.List[SpeechRecognitionRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + selected_model: typing.Optional[SpeechRecognitionRequestSelectedModel] = None, + language: typing.Optional[str] = None, + translation_model: typing.Optional[SpeechRecognitionRequestTranslationModel] = None, + output_format: typing.Optional[SpeechRecognitionRequestOutputFormat] = None, + google_translate_target: typing.Optional[str] = None, + translation_source: typing.Optional[str] = None, + translation_target: typing.Optional[str] = None, + glossary_document: typing.Optional[core.File] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsrPageOutput: """ Parameters ---------- + documents : typing.List[core.File] + See core.File for more documentation + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.List[SpeechRecognitionRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + selected_model : typing.Optional[SpeechRecognitionRequestSelectedModel] + + language : typing.Optional[str] + + translation_model : typing.Optional[SpeechRecognitionRequestTranslationModel] + + output_format : typing.Optional[SpeechRecognitionRequestOutputFormat] + + google_translate_target : typing.Optional[str] + use `translation_model` & `translation_target` instead. + + translation_source : typing.Optional[str] + + translation_target : typing.Optional[str] + + glossary_document : typing.Optional[core.File] + See core.File for more documentation + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - FunctionsPageStatusResponse + AsrPageOutput Successful Response Examples @@ -721,39 +2453,131 @@ def post_v3functions_async( client = Gooey( api_key="YOUR_API_KEY", ) - client.post_v3functions_async() + client.speech_recognition() """ _response = self._client_wrapper.httpx_client.request( - "v3/functions/async", + "v3/asr/async", method="POST", + params={ + "example_id": example_id, + }, + data={ + "functions": functions, + "variables": variables, + "selected_model": selected_model, + "language": language, + "translation_model": translation_model, + "output_format": output_format, + "google_translate_target": google_translate_target, + "translation_source": translation_source, + "translation_target": translation_target, + "settings": settings, + }, + files={ + "documents": documents, + "glossary_document": glossary_document, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - FunctionsPageStatusResponse, + AsrPageOutput, parse_obj_as( - type_=FunctionsPageStatusResponse, # type: ignore + type_=AsrPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3lipsync_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> LipsyncPageStatusResponse: + def text_to_music( + self, + *, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[Text2AudioPageRequestFunctionsItem]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + duration_sec: typing.Optional[float] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + seed: typing.Optional[int] = OMIT, + sd2upscaling: typing.Optional[bool] = OMIT, + selected_models: typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> Text2AudioPageOutput: """ Parameters ---------- + text_prompt : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[Text2AudioPageRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + negative_prompt : typing.Optional[str] + + duration_sec : typing.Optional[float] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + seed : typing.Optional[int] + + sd2upscaling : typing.Optional[bool] + + selected_models : typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - LipsyncPageStatusResponse + Text2AudioPageOutput Successful Response Examples @@ -763,39 +2587,120 @@ def post_v3lipsync_async( client = Gooey( api_key="YOUR_API_KEY", ) - client.post_v3lipsync_async() + client.text_to_music( + text_prompt="text_prompt", + ) """ _response = self._client_wrapper.httpx_client.request( - "v3/Lipsync/async", + "v3/text2audio/async", method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "text_prompt": text_prompt, + "negative_prompt": negative_prompt, + "duration_sec": duration_sec, + "num_outputs": num_outputs, + "quality": quality, + "guidance_scale": guidance_scale, + "seed": seed, + "sd_2_upscaling": sd2upscaling, + "selected_models": selected_models, + "settings": settings, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - LipsyncPageStatusResponse, + Text2AudioPageOutput, parse_obj_as( - type_=LipsyncPageStatusResponse, # type: ignore + type_=Text2AudioPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3lipsync_tts_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> LipsyncTtsPageStatusResponse: + def translate( + self, + *, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.List[TranslateRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + texts: typing.Optional[typing.List[str]] = None, + selected_model: typing.Optional[TranslateRequestSelectedModel] = None, + translation_source: typing.Optional[str] = None, + translation_target: typing.Optional[str] = None, + glossary_document: typing.Optional[core.File] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> TranslationPageOutput: """ Parameters ---------- + example_id : typing.Optional[str] + + functions : typing.Optional[typing.List[TranslateRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + texts : typing.Optional[typing.List[str]] + + selected_model : typing.Optional[TranslateRequestSelectedModel] + + translation_source : typing.Optional[str] + + translation_target : typing.Optional[str] + + glossary_document : typing.Optional[core.File] + See core.File for more documentation + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - LipsyncTtsPageStatusResponse + TranslationPageOutput Successful Response Examples @@ -805,39 +2710,143 @@ def post_v3lipsync_tts_async( client = Gooey( api_key="YOUR_API_KEY", ) - client.post_v3lipsync_tts_async() + client.translate() """ _response = self._client_wrapper.httpx_client.request( - "v3/LipsyncTTS/async", + "v3/translate/async", method="POST", + params={ + "example_id": example_id, + }, + data={ + "functions": functions, + "variables": variables, + "texts": texts, + "selected_model": selected_model, + "translation_source": translation_source, + "translation_target": translation_target, + "settings": settings, + }, + files={ + "glossary_document": glossary_document, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - LipsyncTtsPageStatusResponse, + TranslationPageOutput, parse_obj_as( - type_=LipsyncTtsPageStatusResponse, # type: ignore + type_=TranslationPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3text_to_speech_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> TextToSpeechPageStatusResponse: + def remix_image( + self, + *, + input_image: core.File, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.List[RemixImageRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + text_prompt: typing.Optional[str] = None, + selected_model: typing.Optional[RemixImageRequestSelectedModel] = None, + selected_controlnet_model: typing.Optional[RemixImageRequestSelectedControlnetModel] = None, + negative_prompt: typing.Optional[str] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[int] = None, + output_width: typing.Optional[int] = None, + output_height: typing.Optional[int] = None, + guidance_scale: typing.Optional[float] = None, + prompt_strength: typing.Optional[float] = None, + controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None, + seed: typing.Optional[int] = None, + image_guidance_scale: typing.Optional[float] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> Img2ImgPageOutput: """ Parameters ---------- + input_image : core.File + See core.File for more documentation + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.List[RemixImageRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + text_prompt : typing.Optional[str] + + selected_model : typing.Optional[RemixImageRequestSelectedModel] + + selected_controlnet_model : typing.Optional[RemixImageRequestSelectedControlnetModel] + + negative_prompt : typing.Optional[str] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + output_width : typing.Optional[int] + + output_height : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + prompt_strength : typing.Optional[float] + + controlnet_conditioning_scale : typing.Optional[typing.List[float]] + + seed : typing.Optional[int] + + image_guidance_scale : typing.Optional[float] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - TextToSpeechPageStatusResponse + Img2ImgPageOutput Successful Response Examples @@ -847,37 +2856,154 @@ def post_v3text_to_speech_async( client = Gooey( api_key="YOUR_API_KEY", ) - client.post_v3text_to_speech_async() + client.remix_image() """ _response = self._client_wrapper.httpx_client.request( - "v3/TextToSpeech/async", + "v3/Img2Img/async", method="POST", + params={ + "example_id": example_id, + }, + data={ + "functions": functions, + "variables": variables, + "text_prompt": text_prompt, + "selected_model": selected_model, + "selected_controlnet_model": selected_controlnet_model, + "negative_prompt": negative_prompt, + "num_outputs": num_outputs, + "quality": quality, + "output_width": output_width, + "output_height": output_height, + "guidance_scale": guidance_scale, + "prompt_strength": prompt_strength, + "controlnet_conditioning_scale": controlnet_conditioning_scale, + "seed": seed, + "image_guidance_scale": image_guidance_scale, + "settings": settings, + }, + files={ + "input_image": input_image, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - TextToSpeechPageStatusResponse, + Img2ImgPageOutput, parse_obj_as( - type_=TextToSpeechPageStatusResponse, # type: ignore + type_=Img2ImgPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3asr_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> AsrPageStatusResponse: + def text_to_image( + self, + *, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[CompareText2ImgPageRequestFunctionsItem]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + output_width: typing.Optional[int] = OMIT, + output_height: typing.Optional[int] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + dall_e3quality: typing.Optional[str] = OMIT, + dall_e3style: typing.Optional[str] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + seed: typing.Optional[int] = OMIT, + sd2upscaling: typing.Optional[bool] = OMIT, + selected_models: typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] = OMIT, + scheduler: typing.Optional[CompareText2ImgPageRequestScheduler] = OMIT, + edit_instruction: typing.Optional[str] = OMIT, + image_guidance_scale: typing.Optional[float] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> CompareText2ImgPageOutput: """ Parameters ---------- + text_prompt : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[CompareText2ImgPageRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + negative_prompt : typing.Optional[str] + + output_width : typing.Optional[int] + + output_height : typing.Optional[int] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + dall_e3quality : typing.Optional[str] + + dall_e3style : typing.Optional[str] + + guidance_scale : typing.Optional[float] + + seed : typing.Optional[int] + + sd2upscaling : typing.Optional[bool] + + selected_models : typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] + + scheduler : typing.Optional[CompareText2ImgPageRequestScheduler] + + edit_instruction : typing.Optional[str] + + image_guidance_scale : typing.Optional[float] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - AsrPageStatusResponse + CompareText2ImgPageOutput Successful Response Examples @@ -887,81 +3013,156 @@ def post_v3asr_async(self, *, request_options: typing.Optional[RequestOptions] = client = Gooey( api_key="YOUR_API_KEY", ) - client.post_v3asr_async() + client.text_to_image( + text_prompt="text_prompt", + ) """ _response = self._client_wrapper.httpx_client.request( - "v3/asr/async", + "v3/CompareText2Img/async", method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "text_prompt": text_prompt, + "negative_prompt": negative_prompt, + "output_width": output_width, + "output_height": output_height, + "num_outputs": num_outputs, + "quality": quality, + "dall_e_3_quality": dall_e3quality, + "dall_e_3_style": dall_e3style, + "guidance_scale": guidance_scale, + "seed": seed, + "sd_2_upscaling": sd2upscaling, + "selected_models": selected_models, + "scheduler": scheduler, + "edit_instruction": edit_instruction, + "image_guidance_scale": image_guidance_scale, + "settings": settings, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - AsrPageStatusResponse, + CompareText2ImgPageOutput, parse_obj_as( - type_=AsrPageStatusResponse, # type: ignore + type_=CompareText2ImgPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3text2audio_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> Text2AudioPageStatusResponse: + def product_image( + self, + *, + input_image: core.File, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.List[ProductImageRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + obj_scale: typing.Optional[float] = None, + obj_pos_x: typing.Optional[float] = None, + obj_pos_y: typing.Optional[float] = None, + mask_threshold: typing.Optional[float] = None, + selected_model: typing.Optional[ProductImageRequestSelectedModel] = None, + negative_prompt: typing.Optional[str] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[int] = None, + output_width: typing.Optional[int] = None, + output_height: typing.Optional[int] = None, + guidance_scale: typing.Optional[float] = None, + sd2upscaling: typing.Optional[bool] = None, + seed: typing.Optional[int] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> ObjectInpaintingPageOutput: """ Parameters ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. + input_image : core.File + See core.File for more documentation - Returns - ------- - Text2AudioPageStatusResponse - Successful Response + text_prompt : str - Examples - -------- - from gooey import Gooey + example_id : typing.Optional[str] - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3text2audio_async() - """ - _response = self._client_wrapper.httpx_client.request( - "v3/text2audio/async", - method="POST", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - Text2AudioPageStatusResponse, - parse_obj_as( - type_=Text2AudioPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + functions : typing.Optional[typing.List[ProductImageRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + obj_scale : typing.Optional[float] + + obj_pos_x : typing.Optional[float] + + obj_pos_y : typing.Optional[float] + + mask_threshold : typing.Optional[float] + + selected_model : typing.Optional[ProductImageRequestSelectedModel] + + negative_prompt : typing.Optional[str] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + output_width : typing.Optional[int] + + output_height : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + sd2upscaling : typing.Optional[bool] + + seed : typing.Optional[int] + + settings : typing.Optional[RunSettings] - def post_v3translate_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> TranslationPageStatusResponse: - """ - Parameters - ---------- request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - TranslationPageStatusResponse + ObjectInpaintingPageOutput Successful Response Examples @@ -971,39 +3172,155 @@ def post_v3translate_async( client = Gooey( api_key="YOUR_API_KEY", ) - client.post_v3translate_async() + client.product_image( + text_prompt="text_prompt", + ) """ _response = self._client_wrapper.httpx_client.request( - "v3/translate/async", + "v3/ObjectInpainting/async", method="POST", + params={ + "example_id": example_id, + }, + data={ + "functions": functions, + "variables": variables, + "text_prompt": text_prompt, + "obj_scale": obj_scale, + "obj_pos_x": obj_pos_x, + "obj_pos_y": obj_pos_y, + "mask_threshold": mask_threshold, + "selected_model": selected_model, + "negative_prompt": negative_prompt, + "num_outputs": num_outputs, + "quality": quality, + "output_width": output_width, + "output_height": output_height, + "guidance_scale": guidance_scale, + "sd_2_upscaling": sd2upscaling, + "seed": seed, + "settings": settings, + }, + files={ + "input_image": input_image, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - TranslationPageStatusResponse, + ObjectInpaintingPageOutput, parse_obj_as( - type_=TranslationPageStatusResponse, # type: ignore + type_=ObjectInpaintingPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3img2img_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> Img2ImgPageStatusResponse: + def portrait( + self, + *, + input_image: core.File, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.List[PortraitRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + face_scale: typing.Optional[float] = None, + face_pos_x: typing.Optional[float] = None, + face_pos_y: typing.Optional[float] = None, + selected_model: typing.Optional[PortraitRequestSelectedModel] = None, + negative_prompt: typing.Optional[str] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[int] = None, + upscale_factor: typing.Optional[float] = None, + output_width: typing.Optional[int] = None, + output_height: typing.Optional[int] = None, + guidance_scale: typing.Optional[float] = None, + seed: typing.Optional[int] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> FaceInpaintingPageOutput: """ Parameters ---------- + input_image : core.File + See core.File for more documentation + + text_prompt : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.List[PortraitRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + face_scale : typing.Optional[float] + + face_pos_x : typing.Optional[float] + + face_pos_y : typing.Optional[float] + + selected_model : typing.Optional[PortraitRequestSelectedModel] + + negative_prompt : typing.Optional[str] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + upscale_factor : typing.Optional[float] + + output_width : typing.Optional[int] + + output_height : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + seed : typing.Optional[int] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - Img2ImgPageStatusResponse + FaceInpaintingPageOutput Successful Response Examples @@ -1013,39 +3330,180 @@ def post_v3img2img_async( client = Gooey( api_key="YOUR_API_KEY", ) - client.post_v3img2img_async() + client.portrait( + text_prompt="text_prompt", + ) """ _response = self._client_wrapper.httpx_client.request( - "v3/Img2Img/async", + "v3/FaceInpainting/async", method="POST", + params={ + "example_id": example_id, + }, + data={ + "functions": functions, + "variables": variables, + "text_prompt": text_prompt, + "face_scale": face_scale, + "face_pos_x": face_pos_x, + "face_pos_y": face_pos_y, + "selected_model": selected_model, + "negative_prompt": negative_prompt, + "num_outputs": num_outputs, + "quality": quality, + "upscale_factor": upscale_factor, + "output_width": output_width, + "output_height": output_height, + "guidance_scale": guidance_scale, + "seed": seed, + "settings": settings, + }, + files={ + "input_image": input_image, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - Img2ImgPageStatusResponse, + FaceInpaintingPageOutput, parse_obj_as( - type_=Img2ImgPageStatusResponse, # type: ignore + type_=FaceInpaintingPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3compare_text2img_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> CompareText2ImgPageStatusResponse: + def image_from_email( + self, + *, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[EmailFaceInpaintingPageRequestFunctionsItem]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + email_address: typing.Optional[str] = OMIT, + twitter_handle: typing.Optional[str] = OMIT, + face_scale: typing.Optional[float] = OMIT, + face_pos_x: typing.Optional[float] = OMIT, + face_pos_y: typing.Optional[float] = OMIT, + selected_model: typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + upscale_factor: typing.Optional[float] = OMIT, + output_width: typing.Optional[int] = OMIT, + output_height: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + should_send_email: typing.Optional[bool] = OMIT, + email_from: typing.Optional[str] = OMIT, + email_cc: typing.Optional[str] = OMIT, + email_bcc: typing.Optional[str] = OMIT, + email_subject: typing.Optional[str] = OMIT, + email_body: typing.Optional[str] = OMIT, + email_body_enable_html: typing.Optional[bool] = OMIT, + fallback_email_body: typing.Optional[str] = OMIT, + seed: typing.Optional[int] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> EmailFaceInpaintingPageOutput: """ Parameters ---------- + text_prompt : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[EmailFaceInpaintingPageRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + email_address : typing.Optional[str] + + twitter_handle : typing.Optional[str] + + face_scale : typing.Optional[float] + + face_pos_x : typing.Optional[float] + + face_pos_y : typing.Optional[float] + + selected_model : typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] + + negative_prompt : typing.Optional[str] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + upscale_factor : typing.Optional[float] + + output_width : typing.Optional[int] + + output_height : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + should_send_email : typing.Optional[bool] + + email_from : typing.Optional[str] + + email_cc : typing.Optional[str] + + email_bcc : typing.Optional[str] + + email_subject : typing.Optional[str] + + email_body : typing.Optional[str] + + email_body_enable_html : typing.Optional[bool] + + fallback_email_body : typing.Optional[str] + + seed : typing.Optional[int] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - CompareText2ImgPageStatusResponse + EmailFaceInpaintingPageOutput Successful Response Examples @@ -1055,39 +3513,159 @@ def post_v3compare_text2img_async( client = Gooey( api_key="YOUR_API_KEY", ) - client.post_v3compare_text2img_async() + client.image_from_email( + email_address="sean@dara.network", + text_prompt="winter's day in paris", + ) """ _response = self._client_wrapper.httpx_client.request( - "v3/CompareText2Img/async", + "v3/EmailFaceInpainting/async", method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "email_address": email_address, + "twitter_handle": twitter_handle, + "text_prompt": text_prompt, + "face_scale": face_scale, + "face_pos_x": face_pos_x, + "face_pos_y": face_pos_y, + "selected_model": selected_model, + "negative_prompt": negative_prompt, + "num_outputs": num_outputs, + "quality": quality, + "upscale_factor": upscale_factor, + "output_width": output_width, + "output_height": output_height, + "guidance_scale": guidance_scale, + "should_send_email": should_send_email, + "email_from": email_from, + "email_cc": email_cc, + "email_bcc": email_bcc, + "email_subject": email_subject, + "email_body": email_body, + "email_body_enable_html": email_body_enable_html, + "fallback_email_body": fallback_email_body, + "seed": seed, + "settings": settings, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - CompareText2ImgPageStatusResponse, + EmailFaceInpaintingPageOutput, parse_obj_as( - type_=CompareText2ImgPageStatusResponse, # type: ignore + type_=EmailFaceInpaintingPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3object_inpainting_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> ObjectInpaintingPageStatusResponse: + def image_from_web_search( + self, + *, + search_query: str, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[GoogleImageGenPageRequestFunctionsItem]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, + selected_model: typing.Optional[GoogleImageGenPageRequestSelectedModel] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + prompt_strength: typing.Optional[float] = OMIT, + sd2upscaling: typing.Optional[bool] = OMIT, + seed: typing.Optional[int] = OMIT, + image_guidance_scale: typing.Optional[float] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> GoogleImageGenPageOutput: """ Parameters ---------- + search_query : str + + text_prompt : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[GoogleImageGenPageRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + serp_search_location : typing.Optional[SerpSearchLocation] + + scaleserp_locations : typing.Optional[typing.Sequence[str]] + DEPRECATED: use `serp_search_location` instead + + selected_model : typing.Optional[GoogleImageGenPageRequestSelectedModel] + + negative_prompt : typing.Optional[str] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + prompt_strength : typing.Optional[float] + + sd2upscaling : typing.Optional[bool] + + seed : typing.Optional[int] + + image_guidance_scale : typing.Optional[float] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - ObjectInpaintingPageStatusResponse + GoogleImageGenPageOutput Successful Response Examples @@ -1097,39 +3675,134 @@ def post_v3object_inpainting_async( client = Gooey( api_key="YOUR_API_KEY", ) - client.post_v3object_inpainting_async() + client.image_from_web_search( + search_query="search_query", + text_prompt="text_prompt", + ) """ _response = self._client_wrapper.httpx_client.request( - "v3/ObjectInpainting/async", + "v3/GoogleImageGen/async", method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "search_query": search_query, + "text_prompt": text_prompt, + "selected_model": selected_model, + "negative_prompt": negative_prompt, + "num_outputs": num_outputs, + "quality": quality, + "guidance_scale": guidance_scale, + "prompt_strength": prompt_strength, + "sd_2_upscaling": sd2upscaling, + "seed": seed, + "image_guidance_scale": image_guidance_scale, + "settings": settings, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - ObjectInpaintingPageStatusResponse, + GoogleImageGenPageOutput, parse_obj_as( - type_=ObjectInpaintingPageStatusResponse, # type: ignore + type_=GoogleImageGenPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3face_inpainting_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> FaceInpaintingPageStatusResponse: + def remove_background( + self, + *, + input_image: core.File, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.List[RemoveBackgroundRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + selected_model: typing.Optional[RemoveBackgroundRequestSelectedModel] = None, + mask_threshold: typing.Optional[float] = None, + rect_persepective_transform: typing.Optional[bool] = None, + reflection_opacity: typing.Optional[float] = None, + obj_scale: typing.Optional[float] = None, + obj_pos_x: typing.Optional[float] = None, + obj_pos_y: typing.Optional[float] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> ImageSegmentationPageOutput: """ Parameters ---------- + input_image : core.File + See core.File for more documentation + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.List[RemoveBackgroundRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + selected_model : typing.Optional[RemoveBackgroundRequestSelectedModel] + + mask_threshold : typing.Optional[float] + + rect_persepective_transform : typing.Optional[bool] + + reflection_opacity : typing.Optional[float] + + obj_scale : typing.Optional[float] + + obj_pos_x : typing.Optional[float] + + obj_pos_y : typing.Optional[float] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - FaceInpaintingPageStatusResponse + ImageSegmentationPageOutput Successful Response Examples @@ -1139,39 +3812,121 @@ def post_v3face_inpainting_async( client = Gooey( api_key="YOUR_API_KEY", ) - client.post_v3face_inpainting_async() + client.remove_background() """ _response = self._client_wrapper.httpx_client.request( - "v3/FaceInpainting/async", + "v3/ImageSegmentation/async", method="POST", + params={ + "example_id": example_id, + }, + data={ + "functions": functions, + "variables": variables, + "selected_model": selected_model, + "mask_threshold": mask_threshold, + "rect_persepective_transform": rect_persepective_transform, + "reflection_opacity": reflection_opacity, + "obj_scale": obj_scale, + "obj_pos_x": obj_pos_x, + "obj_pos_y": obj_pos_y, + "settings": settings, + }, + files={ + "input_image": input_image, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - FaceInpaintingPageStatusResponse, + ImageSegmentationPageOutput, parse_obj_as( - type_=FaceInpaintingPageStatusResponse, # type: ignore + type_=ImageSegmentationPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3email_face_inpainting_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> EmailFaceInpaintingPageStatusResponse: + def upscale( + self, + *, + scale: int, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.List[UpscaleRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + input_image: typing.Optional[core.File] = None, + input_video: typing.Optional[core.File] = None, + selected_models: typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]] = None, + selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> CompareUpscalerPageOutput: """ Parameters ---------- + scale : int + The final upsampling scale of the image + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.List[UpscaleRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + input_image : typing.Optional[core.File] + See core.File for more documentation + + input_video : typing.Optional[core.File] + See core.File for more documentation + + selected_models : typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]] + + selected_bg_model : typing.Optional[typing.Literal["real_esrgan_x2"]] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - EmailFaceInpaintingPageStatusResponse + CompareUpscalerPageOutput Successful Response Examples @@ -1181,39 +3936,108 @@ def post_v3email_face_inpainting_async( client = Gooey( api_key="YOUR_API_KEY", ) - client.post_v3email_face_inpainting_async() + client.upscale( + scale=1, + ) """ _response = self._client_wrapper.httpx_client.request( - "v3/EmailFaceInpainting/async", + "v3/compare-ai-upscalers/async", method="POST", + params={ + "example_id": example_id, + }, + data={ + "functions": functions, + "variables": variables, + "scale": scale, + "selected_models": selected_models, + "selected_bg_model": selected_bg_model, + "settings": settings, + }, + files={ + "input_image": input_image, + "input_video": input_video, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - EmailFaceInpaintingPageStatusResponse, + CompareUpscalerPageOutput, parse_obj_as( - type_=EmailFaceInpaintingPageStatusResponse, # type: ignore + type_=CompareUpscalerPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3google_image_gen_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> GoogleImageGenPageStatusResponse: + def embed( + self, + *, + texts: typing.Sequence[str], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[EmbeddingsPageRequestFunctionsItem]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + selected_model: typing.Optional[EmbeddingsPageRequestSelectedModel] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> EmbeddingsPageOutput: """ Parameters ---------- + texts : typing.Sequence[str] + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[EmbeddingsPageRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + selected_model : typing.Optional[EmbeddingsPageRequestSelectedModel] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - GoogleImageGenPageStatusResponse + EmbeddingsPageOutput Successful Response Examples @@ -1223,39 +4047,172 @@ def post_v3google_image_gen_async( client = Gooey( api_key="YOUR_API_KEY", ) - client.post_v3google_image_gen_async() + client.embed( + texts=["texts"], + ) """ _response = self._client_wrapper.httpx_client.request( - "v3/GoogleImageGen/async", + "v3/embeddings/async", method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "texts": texts, + "selected_model": selected_model, + "settings": settings, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - GoogleImageGenPageStatusResponse, + EmbeddingsPageOutput, parse_obj_as( - type_=GoogleImageGenPageStatusResponse, # type: ignore + type_=EmbeddingsPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3image_segmentation_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> ImageSegmentationPageStatusResponse: + def seo_people_also_ask_doc( + self, + *, + search_query: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RelatedQnADocPageRequestFunctionsItem]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + keyword_query: typing.Optional[RelatedQnADocPageRequestKeywordQuery] = OMIT, + documents: typing.Optional[typing.Sequence[str]] = OMIT, + max_references: typing.Optional[int] = OMIT, + max_context_words: typing.Optional[int] = OMIT, + scroll_jump: typing.Optional[int] = OMIT, + doc_extract_url: typing.Optional[str] = OMIT, + embedding_model: typing.Optional[RelatedQnADocPageRequestEmbeddingModel] = OMIT, + dense_weight: typing.Optional[float] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + query_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[RelatedQnADocPageRequestSelectedModel] = OMIT, + citation_style: typing.Optional[RelatedQnADocPageRequestCitationStyle] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[RelatedQnADocPageRequestResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, + serp_search_type: typing.Optional[SerpSearchType] = OMIT, + scaleserp_search_field: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> RelatedQnADocPageOutput: """ Parameters ---------- + search_query : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RelatedQnADocPageRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + keyword_query : typing.Optional[RelatedQnADocPageRequestKeywordQuery] + + documents : typing.Optional[typing.Sequence[str]] + + max_references : typing.Optional[int] + + max_context_words : typing.Optional[int] + + scroll_jump : typing.Optional[int] + + doc_extract_url : typing.Optional[str] + + embedding_model : typing.Optional[RelatedQnADocPageRequestEmbeddingModel] + + dense_weight : typing.Optional[float] + + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + + task_instructions : typing.Optional[str] + + query_instructions : typing.Optional[str] + + selected_model : typing.Optional[RelatedQnADocPageRequestSelectedModel] + + citation_style : typing.Optional[RelatedQnADocPageRequestCitationStyle] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[RelatedQnADocPageRequestResponseFormatType] + + serp_search_location : typing.Optional[SerpSearchLocation] + + scaleserp_locations : typing.Optional[typing.Sequence[str]] + DEPRECATED: use `serp_search_location` instead + + serp_search_type : typing.Optional[SerpSearchType] + + scaleserp_search_field : typing.Optional[str] + DEPRECATED: use `serp_search_type` instead + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - ImageSegmentationPageStatusResponse + RelatedQnADocPageOutput Successful Response Examples @@ -1265,30 +4222,94 @@ def post_v3image_segmentation_async( client = Gooey( api_key="YOUR_API_KEY", ) - client.post_v3image_segmentation_async() + client.seo_people_also_ask_doc( + search_query="search_query", + ) """ _response = self._client_wrapper.httpx_client.request( - "v3/ImageSegmentation/async", + "v3/related-qna-maker-doc/async", method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "search_query": search_query, + "keyword_query": keyword_query, + "documents": documents, + "max_references": max_references, + "max_context_words": max_context_words, + "scroll_jump": scroll_jump, + "doc_extract_url": doc_extract_url, + "embedding_model": embedding_model, + "dense_weight": dense_weight, + "task_instructions": task_instructions, + "query_instructions": query_instructions, + "selected_model": selected_model, + "citation_style": citation_style, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "serp_search_type": serp_search_type, + "scaleserp_search_field": scaleserp_search_field, + "settings": settings, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - ImageSegmentationPageStatusResponse, + RelatedQnADocPageOutput, parse_obj_as( - type_=ImageSegmentationPageStatusResponse, # type: ignore + type_=RelatedQnADocPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3compare_ai_upscalers_async( + def health_status_get( self, *, request_options: typing.Optional[RequestOptions] = None - ) -> CompareUpscalerPageStatusResponse: + ) -> typing.Optional[typing.Any]: """ Parameters ---------- @@ -1297,7 +4318,7 @@ def post_v3compare_ai_upscalers_async( Returns ------- - CompareUpscalerPageStatusResponse + typing.Optional[typing.Any] Successful Response Examples @@ -1307,229 +4328,19 @@ def post_v3compare_ai_upscalers_async( client = Gooey( api_key="YOUR_API_KEY", ) - client.post_v3compare_ai_upscalers_async() + client.health_status_get() """ _response = self._client_wrapper.httpx_client.request( - "v3/compare-ai-upscalers/async", - method="POST", + "status", + method="GET", request_options=request_options, ) try: if 200 <= _response.status_code < 300: return typing.cast( - CompareUpscalerPageStatusResponse, + typing.Optional[typing.Any], parse_obj_as( - type_=CompareUpscalerPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def post_v3chyron_plant_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> ChyronPlantPageStatusResponse: - """ - Parameters - ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ChyronPlantPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3chyron_plant_async() - """ - _response = self._client_wrapper.httpx_client.request( - "v3/ChyronPlant/async", - method="POST", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - ChyronPlantPageStatusResponse, - parse_obj_as( - type_=ChyronPlantPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def post_v3letter_writer_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> LetterWriterPageStatusResponse: - """ - Parameters - ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - LetterWriterPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3letter_writer_async() - """ - _response = self._client_wrapper.httpx_client.request( - "v3/LetterWriter/async", - method="POST", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - LetterWriterPageStatusResponse, - parse_obj_as( - type_=LetterWriterPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def post_v3embeddings_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> EmbeddingsPageStatusResponse: - """ - Parameters - ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EmbeddingsPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3embeddings_async() - """ - _response = self._client_wrapper.httpx_client.request( - "v3/embeddings/async", - method="POST", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - EmbeddingsPageStatusResponse, - parse_obj_as( - type_=EmbeddingsPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def post_v3related_qna_maker_doc_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> RelatedQnADocPageStatusResponse: - """ - Parameters - ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - RelatedQnADocPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3related_qna_maker_doc_async() - """ - _response = self._client_wrapper.httpx_client.request( - "v3/related-qna-maker-doc/async", - method="POST", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - RelatedQnADocPageStatusResponse, - parse_obj_as( - type_=RelatedQnADocPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def health_status_get( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Optional[typing.Any]: - """ - Parameters - ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.Optional[typing.Any] - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.health_status_get() - """ - _response = self._client_wrapper.httpx_client.request( - "status", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=typing.Optional[typing.Any], # type: ignore object_=_response.json(), ), ) @@ -1600,227 +4411,83 @@ def __init__( timeout=_defaulted_timeout, ) self.copilot_integrations = AsyncCopilotIntegrationsClient(client_wrapper=self._client_wrapper) + self.copilot_for_your_enterprise = AsyncCopilotForYourEnterpriseClient(client_wrapper=self._client_wrapper) + self.evaluator = AsyncEvaluatorClient(client_wrapper=self._client_wrapper) + self.smart_gpt = AsyncSmartGptClient(client_wrapper=self._client_wrapper) + self.functions = AsyncFunctionsClient(client_wrapper=self._client_wrapper) + self.lip_syncing = AsyncLipSyncingClient(client_wrapper=self._client_wrapper) self.misc = AsyncMiscClient(client_wrapper=self._client_wrapper) - async def post_v3video_bots_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> VideoBotsPageStatusResponse: - """ - Parameters - ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - VideoBotsPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.post_v3video_bots_async() - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/video-bots/async", - method="POST", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - VideoBotsPageStatusResponse, - parse_obj_as( - type_=VideoBotsPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def post_v3deforum_sd_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> DeforumSdPageStatusResponse: - """ - Parameters - ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DeforumSdPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.post_v3deforum_sd_async() - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/DeforumSD/async", - method="POST", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - DeforumSdPageStatusResponse, - parse_obj_as( - type_=DeforumSdPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def post_v3art_qr_code_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> QrCodeGeneratorPageStatusResponse: + async def animate( + self, + *, + animation_prompts: typing.Sequence[DeforumSdPageRequestAnimationPromptsItem], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[DeforumSdPageRequestFunctionsItem]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + max_frames: typing.Optional[int] = OMIT, + selected_model: typing.Optional[DeforumSdPageRequestSelectedModel] = OMIT, + animation_mode: typing.Optional[str] = OMIT, + zoom: typing.Optional[str] = OMIT, + translation_x: typing.Optional[str] = OMIT, + translation_y: typing.Optional[str] = OMIT, + rotation3d_x: typing.Optional[str] = OMIT, + rotation3d_y: typing.Optional[str] = OMIT, + rotation3d_z: typing.Optional[str] = OMIT, + fps: typing.Optional[int] = OMIT, + seed: typing.Optional[int] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> DeforumSdPageOutput: """ Parameters ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. + animation_prompts : typing.Sequence[DeforumSdPageRequestAnimationPromptsItem] - Returns - ------- - QrCodeGeneratorPageStatusResponse - Successful Response + example_id : typing.Optional[str] - Examples - -------- - import asyncio + functions : typing.Optional[typing.Sequence[DeforumSdPageRequestFunctionsItem]] - from gooey import AsyncGooey + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) + max_frames : typing.Optional[int] + selected_model : typing.Optional[DeforumSdPageRequestSelectedModel] - async def main() -> None: - await client.post_v3art_qr_code_async() + animation_mode : typing.Optional[str] + zoom : typing.Optional[str] - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/art-qr-code/async", - method="POST", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - QrCodeGeneratorPageStatusResponse, - parse_obj_as( - type_=QrCodeGeneratorPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + translation_x : typing.Optional[str] - async def post_v3related_qna_maker_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> RelatedQnAPageStatusResponse: - """ - Parameters - ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - RelatedQnAPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) + translation_y : typing.Optional[str] + rotation3d_x : typing.Optional[str] - async def main() -> None: - await client.post_v3related_qna_maker_async() + rotation3d_y : typing.Optional[str] + rotation3d_z : typing.Optional[str] - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/related-qna-maker/async", - method="POST", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - RelatedQnAPageStatusResponse, - parse_obj_as( - type_=RelatedQnAPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + fps : typing.Optional[int] + + seed : typing.Optional[int] + + settings : typing.Optional[RunSettings] - async def post_v3seo_summary_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> SeoSummaryPageStatusResponse: - """ - Parameters - ---------- request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - SeoSummaryPageStatusResponse + DeforumSdPageOutput Successful Response Examples -------- import asyncio - from gooey import AsyncGooey + from gooey import AsyncGooey, DeforumSdPageRequestAnimationPromptsItem client = AsyncGooey( api_key="YOUR_API_KEY", @@ -1828,142 +4495,197 @@ async def post_v3seo_summary_async( async def main() -> None: - await client.post_v3seo_summary_async() + await client.animate( + animation_prompts=[ + DeforumSdPageRequestAnimationPromptsItem( + frame="frame", + prompt="prompt", + ) + ], + ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/SEOSummary/async", + "v3/DeforumSD/async", method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "animation_prompts": animation_prompts, + "max_frames": max_frames, + "selected_model": selected_model, + "animation_mode": animation_mode, + "zoom": zoom, + "translation_x": translation_x, + "translation_y": translation_y, + "rotation_3d_x": rotation3d_x, + "rotation_3d_y": rotation3d_y, + "rotation_3d_z": rotation3d_z, + "fps": fps, + "seed": seed, + "settings": settings, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - SeoSummaryPageStatusResponse, + DeforumSdPageOutput, parse_obj_as( - type_=SeoSummaryPageStatusResponse, # type: ignore + type_=DeforumSdPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3google_gpt_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> GoogleGptPageStatusResponse: + async def qr_code( + self, + *, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.List[QrCodeRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + qr_code_data: typing.Optional[str] = None, + qr_code_input_image: typing.Optional[core.File] = None, + qr_code_vcard: typing.Optional[QrCodeRequestQrCodeVcard] = None, + qr_code_file: typing.Optional[core.File] = None, + use_url_shortener: typing.Optional[bool] = None, + negative_prompt: typing.Optional[str] = None, + image_prompt: typing.Optional[str] = None, + image_prompt_controlnet_models: typing.Optional[ + typing.List[QrCodeRequestImagePromptControlnetModelsItem] + ] = None, + image_prompt_strength: typing.Optional[float] = None, + image_prompt_scale: typing.Optional[float] = None, + image_prompt_pos_x: typing.Optional[float] = None, + image_prompt_pos_y: typing.Optional[float] = None, + selected_model: typing.Optional[QrCodeRequestSelectedModel] = None, + selected_controlnet_model: typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]] = None, + output_width: typing.Optional[int] = None, + output_height: typing.Optional[int] = None, + guidance_scale: typing.Optional[float] = None, + controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[int] = None, + scheduler: typing.Optional[QrCodeRequestScheduler] = None, + seed: typing.Optional[int] = None, + obj_scale: typing.Optional[float] = None, + obj_pos_x: typing.Optional[float] = None, + obj_pos_y: typing.Optional[float] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> QrCodeGeneratorPageOutput: """ Parameters ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. + text_prompt : str - Returns - ------- - GoogleGptPageStatusResponse - Successful Response + example_id : typing.Optional[str] - Examples - -------- - import asyncio + functions : typing.Optional[typing.List[QrCodeRequestFunctionsItem]] - from gooey import AsyncGooey + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) + qr_code_data : typing.Optional[str] + qr_code_input_image : typing.Optional[core.File] + See core.File for more documentation - async def main() -> None: - await client.post_v3google_gpt_async() + qr_code_vcard : typing.Optional[QrCodeRequestQrCodeVcard] + qr_code_file : typing.Optional[core.File] + See core.File for more documentation - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/google-gpt/async", - method="POST", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - GoogleGptPageStatusResponse, - parse_obj_as( - type_=GoogleGptPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + use_url_shortener : typing.Optional[bool] - async def post_v3social_lookup_email_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> SocialLookupEmailPageStatusResponse: - """ - Parameters - ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. + negative_prompt : typing.Optional[str] - Returns - ------- - SocialLookupEmailPageStatusResponse - Successful Response + image_prompt : typing.Optional[str] - Examples - -------- - import asyncio + image_prompt_controlnet_models : typing.Optional[typing.List[QrCodeRequestImagePromptControlnetModelsItem]] - from gooey import AsyncGooey + image_prompt_strength : typing.Optional[float] - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) + image_prompt_scale : typing.Optional[float] + image_prompt_pos_x : typing.Optional[float] - async def main() -> None: - await client.post_v3social_lookup_email_async() + image_prompt_pos_y : typing.Optional[float] + selected_model : typing.Optional[QrCodeRequestSelectedModel] - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/SocialLookupEmail/async", - method="POST", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - SocialLookupEmailPageStatusResponse, - parse_obj_as( - type_=SocialLookupEmailPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + selected_controlnet_model : typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]] + + output_width : typing.Optional[int] + + output_height : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + controlnet_conditioning_scale : typing.Optional[typing.List[float]] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + scheduler : typing.Optional[QrCodeRequestScheduler] + + seed : typing.Optional[int] + + obj_scale : typing.Optional[float] + + obj_pos_x : typing.Optional[float] + + obj_pos_y : typing.Optional[float] + + settings : typing.Optional[RunSettings] - async def post_v3bulk_runner_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> BulkRunnerPageStatusResponse: - """ - Parameters - ---------- request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - BulkRunnerPageStatusResponse + QrCodeGeneratorPageOutput Successful Response Examples @@ -1978,42 +4700,195 @@ async def post_v3bulk_runner_async( async def main() -> None: - await client.post_v3bulk_runner_async() + await client.qr_code( + text_prompt="text_prompt", + ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/bulk-runner/async", + "v3/art-qr-code/async", method="POST", + params={ + "example_id": example_id, + }, + data={ + "functions": functions, + "variables": variables, + "qr_code_data": qr_code_data, + "qr_code_vcard": qr_code_vcard, + "use_url_shortener": use_url_shortener, + "text_prompt": text_prompt, + "negative_prompt": negative_prompt, + "image_prompt": image_prompt, + "image_prompt_controlnet_models": image_prompt_controlnet_models, + "image_prompt_strength": image_prompt_strength, + "image_prompt_scale": image_prompt_scale, + "image_prompt_pos_x": image_prompt_pos_x, + "image_prompt_pos_y": image_prompt_pos_y, + "selected_model": selected_model, + "selected_controlnet_model": selected_controlnet_model, + "output_width": output_width, + "output_height": output_height, + "guidance_scale": guidance_scale, + "controlnet_conditioning_scale": controlnet_conditioning_scale, + "num_outputs": num_outputs, + "quality": quality, + "scheduler": scheduler, + "seed": seed, + "obj_scale": obj_scale, + "obj_pos_x": obj_pos_x, + "obj_pos_y": obj_pos_y, + "settings": settings, + }, + files={ + "qr_code_input_image": qr_code_input_image, + "qr_code_file": qr_code_file, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - BulkRunnerPageStatusResponse, + QrCodeGeneratorPageOutput, parse_obj_as( - type_=BulkRunnerPageStatusResponse, # type: ignore + type_=QrCodeGeneratorPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3bulk_eval_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> BulkEvalPageStatusResponse: + async def seo_people_also_ask( + self, + *, + search_query: str, + site_filter: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RelatedQnAPageRequestFunctionsItem]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + query_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[RelatedQnAPageRequestSelectedModel] = OMIT, + max_search_urls: typing.Optional[int] = OMIT, + max_references: typing.Optional[int] = OMIT, + max_context_words: typing.Optional[int] = OMIT, + scroll_jump: typing.Optional[int] = OMIT, + embedding_model: typing.Optional[RelatedQnAPageRequestEmbeddingModel] = OMIT, + dense_weight: typing.Optional[float] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[RelatedQnAPageRequestResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, + serp_search_type: typing.Optional[SerpSearchType] = OMIT, + scaleserp_search_field: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> RelatedQnAPageOutput: """ Parameters ---------- + search_query : str + + site_filter : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RelatedQnAPageRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + task_instructions : typing.Optional[str] + + query_instructions : typing.Optional[str] + + selected_model : typing.Optional[RelatedQnAPageRequestSelectedModel] + + max_search_urls : typing.Optional[int] + + max_references : typing.Optional[int] + + max_context_words : typing.Optional[int] + + scroll_jump : typing.Optional[int] + + embedding_model : typing.Optional[RelatedQnAPageRequestEmbeddingModel] + + dense_weight : typing.Optional[float] + + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[RelatedQnAPageRequestResponseFormatType] + + serp_search_location : typing.Optional[SerpSearchLocation] + + scaleserp_locations : typing.Optional[typing.Sequence[str]] + DEPRECATED: use `serp_search_location` instead + + serp_search_type : typing.Optional[SerpSearchType] + + scaleserp_search_field : typing.Optional[str] + DEPRECATED: use `serp_search_type` instead + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - BulkEvalPageStatusResponse + RelatedQnAPageOutput Successful Response Examples @@ -2028,42 +4903,175 @@ async def post_v3bulk_eval_async( async def main() -> None: - await client.post_v3bulk_eval_async() + await client.seo_people_also_ask( + search_query="search_query", + site_filter="site_filter", + ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/bulk-eval/async", + "v3/related-qna-maker/async", method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "search_query": search_query, + "site_filter": site_filter, + "task_instructions": task_instructions, + "query_instructions": query_instructions, + "selected_model": selected_model, + "max_search_urls": max_search_urls, + "max_references": max_references, + "max_context_words": max_context_words, + "scroll_jump": scroll_jump, + "embedding_model": embedding_model, + "dense_weight": dense_weight, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "serp_search_type": serp_search_type, + "scaleserp_search_field": scaleserp_search_field, + "settings": settings, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - BulkEvalPageStatusResponse, + RelatedQnAPageOutput, parse_obj_as( - type_=BulkEvalPageStatusResponse, # type: ignore + type_=RelatedQnAPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3doc_extract_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> DocExtractPageStatusResponse: + async def seo_content( + self, + *, + search_query: str, + keywords: str, + title: str, + company_url: str, + example_id: typing.Optional[str] = None, + task_instructions: typing.Optional[str] = OMIT, + enable_html: typing.Optional[bool] = OMIT, + selected_model: typing.Optional[SeoSummaryPageRequestSelectedModel] = OMIT, + max_search_urls: typing.Optional[int] = OMIT, + enable_crosslinks: typing.Optional[bool] = OMIT, + seed: typing.Optional[int] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[SeoSummaryPageRequestResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, + serp_search_type: typing.Optional[SerpSearchType] = OMIT, + scaleserp_search_field: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> SeoSummaryPageOutput: """ Parameters ---------- + search_query : str + + keywords : str + + title : str + + company_url : str + + example_id : typing.Optional[str] + + task_instructions : typing.Optional[str] + + enable_html : typing.Optional[bool] + + selected_model : typing.Optional[SeoSummaryPageRequestSelectedModel] + + max_search_urls : typing.Optional[int] + + enable_crosslinks : typing.Optional[bool] + + seed : typing.Optional[int] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[SeoSummaryPageRequestResponseFormatType] + + serp_search_location : typing.Optional[SerpSearchLocation] + + scaleserp_locations : typing.Optional[typing.Sequence[str]] + DEPRECATED: use `serp_search_location` instead + + serp_search_type : typing.Optional[SerpSearchType] + + scaleserp_search_field : typing.Optional[str] + DEPRECATED: use `serp_search_type` instead + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - DocExtractPageStatusResponse + SeoSummaryPageOutput Successful Response Examples @@ -2078,42 +5086,188 @@ async def post_v3doc_extract_async( async def main() -> None: - await client.post_v3doc_extract_async() + await client.seo_content( + search_query="search_query", + keywords="keywords", + title="title", + company_url="company_url", + ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/doc-extract/async", + "v3/SEOSummary/async", method="POST", + params={ + "example_id": example_id, + }, + json={ + "search_query": search_query, + "keywords": keywords, + "title": title, + "company_url": company_url, + "task_instructions": task_instructions, + "enable_html": enable_html, + "selected_model": selected_model, + "max_search_urls": max_search_urls, + "enable_crosslinks": enable_crosslinks, + "seed": seed, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "serp_search_type": serp_search_type, + "scaleserp_search_field": scaleserp_search_field, + "settings": settings, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - DocExtractPageStatusResponse, + SeoSummaryPageOutput, parse_obj_as( - type_=DocExtractPageStatusResponse, # type: ignore + type_=SeoSummaryPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3compare_llm_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> CompareLlmPageStatusResponse: + async def web_search_llm( + self, + *, + search_query: str, + site_filter: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[GoogleGptPageRequestFunctionsItem]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + query_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[GoogleGptPageRequestSelectedModel] = OMIT, + max_search_urls: typing.Optional[int] = OMIT, + max_references: typing.Optional[int] = OMIT, + max_context_words: typing.Optional[int] = OMIT, + scroll_jump: typing.Optional[int] = OMIT, + embedding_model: typing.Optional[GoogleGptPageRequestEmbeddingModel] = OMIT, + dense_weight: typing.Optional[float] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[GoogleGptPageRequestResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, + serp_search_type: typing.Optional[SerpSearchType] = OMIT, + scaleserp_search_field: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> GoogleGptPageOutput: """ Parameters ---------- + search_query : str + + site_filter : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[GoogleGptPageRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + task_instructions : typing.Optional[str] + + query_instructions : typing.Optional[str] + + selected_model : typing.Optional[GoogleGptPageRequestSelectedModel] + + max_search_urls : typing.Optional[int] + + max_references : typing.Optional[int] + + max_context_words : typing.Optional[int] + + scroll_jump : typing.Optional[int] + + embedding_model : typing.Optional[GoogleGptPageRequestEmbeddingModel] + + dense_weight : typing.Optional[float] + + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[GoogleGptPageRequestResponseFormatType] + + serp_search_location : typing.Optional[SerpSearchLocation] + + scaleserp_locations : typing.Optional[typing.Sequence[str]] + DEPRECATED: use `serp_search_location` instead + + serp_search_type : typing.Optional[SerpSearchType] + + scaleserp_search_field : typing.Optional[str] + DEPRECATED: use `serp_search_type` instead + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - CompareLlmPageStatusResponse + GoogleGptPageOutput Successful Response Examples @@ -2128,92 +5282,147 @@ async def post_v3compare_llm_async( async def main() -> None: - await client.post_v3compare_llm_async() + await client.web_search_llm( + search_query="search_query", + site_filter="site_filter", + ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/CompareLLM/async", + "v3/google-gpt/async", method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "search_query": search_query, + "site_filter": site_filter, + "task_instructions": task_instructions, + "query_instructions": query_instructions, + "selected_model": selected_model, + "max_search_urls": max_search_urls, + "max_references": max_references, + "max_context_words": max_context_words, + "scroll_jump": scroll_jump, + "embedding_model": embedding_model, + "dense_weight": dense_weight, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "serp_search_type": serp_search_type, + "scaleserp_search_field": scaleserp_search_field, + "settings": settings, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - CompareLlmPageStatusResponse, + GoogleGptPageOutput, parse_obj_as( - type_=CompareLlmPageStatusResponse, # type: ignore + type_=GoogleGptPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3doc_search_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> DocSearchPageStatusResponse: + async def personalize_email( + self, + *, + email_address: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[SocialLookupEmailPageRequestFunctionsItem]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + input_prompt: typing.Optional[str] = OMIT, + selected_model: typing.Optional[SocialLookupEmailPageRequestSelectedModel] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[SocialLookupEmailPageRequestResponseFormatType] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> SocialLookupEmailPageOutput: """ Parameters ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. + email_address : str - Returns - ------- - DocSearchPageStatusResponse - Successful Response + example_id : typing.Optional[str] - Examples - -------- - import asyncio + functions : typing.Optional[typing.Sequence[SocialLookupEmailPageRequestFunctionsItem]] - from gooey import AsyncGooey + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) + input_prompt : typing.Optional[str] + selected_model : typing.Optional[SocialLookupEmailPageRequestSelectedModel] - async def main() -> None: - await client.post_v3doc_search_async() + avoid_repetition : typing.Optional[bool] + num_outputs : typing.Optional[int] - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/doc-search/async", - method="POST", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - DocSearchPageStatusResponse, - parse_obj_as( - type_=DocSearchPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[SocialLookupEmailPageRequestResponseFormatType] + + settings : typing.Optional[RunSettings] - async def post_v3smart_gpt_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> SmartGptPageStatusResponse: - """ - Parameters - ---------- request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - SmartGptPageStatusResponse + SocialLookupEmailPageOutput Successful Response Examples @@ -2228,42 +5437,136 @@ async def post_v3smart_gpt_async( async def main() -> None: - await client.post_v3smart_gpt_async() + await client.personalize_email( + email_address="email_address", + ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/SmartGPT/async", + "v3/SocialLookupEmail/async", method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "email_address": email_address, + "input_prompt": input_prompt, + "selected_model": selected_model, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - SmartGptPageStatusResponse, + SocialLookupEmailPageOutput, parse_obj_as( - type_=SmartGptPageStatusResponse, # type: ignore + type_=SocialLookupEmailPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3doc_summary_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> DocSummaryPageStatusResponse: + async def bulk_run( + self, + *, + documents: typing.List[core.File], + run_urls: typing.List[str], + input_columns: typing.Dict[str, str], + output_columns: typing.Dict[str, str], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.List[BulkRunRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + eval_urls: typing.Optional[typing.List[str]] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> BulkRunnerPageOutput: """ Parameters ---------- + documents : typing.List[core.File] + See core.File for more documentation + + run_urls : typing.List[str] + + Provide one or more Gooey.AI workflow runs. + You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them. + + + input_columns : typing.Dict[str, str] + + For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it. + + + output_columns : typing.Dict[str, str] + + For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data. + + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.List[BulkRunRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + eval_urls : typing.Optional[typing.List[str]] + + _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs. + + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - DocSummaryPageStatusResponse + BulkRunnerPageOutput Successful Response Examples @@ -2278,42 +5581,149 @@ async def post_v3doc_summary_async( async def main() -> None: - await client.post_v3doc_summary_async() + await client.bulk_run( + run_urls=["run_urls"], + input_columns={"key": "value"}, + output_columns={"key": "value"}, + ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/doc-summary/async", + "v3/bulk-runner/async", method="POST", + params={ + "example_id": example_id, + }, + data={ + "functions": functions, + "variables": variables, + "run_urls": run_urls, + "input_columns": input_columns, + "output_columns": output_columns, + "eval_urls": eval_urls, + "settings": settings, + }, + files={ + "documents": documents, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - DocSummaryPageStatusResponse, + BulkRunnerPageOutput, parse_obj_as( - type_=DocSummaryPageStatusResponse, # type: ignore + type_=BulkRunnerPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3functions_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> FunctionsPageStatusResponse: + async def synthesize_data( + self, + *, + documents: typing.List[core.File], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.List[SynthesizeDataRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + sheet_url: typing.Optional[core.File] = None, + selected_asr_model: typing.Optional[SynthesizeDataRequestSelectedAsrModel] = None, + google_translate_target: typing.Optional[str] = None, + glossary_document: typing.Optional[core.File] = None, + task_instructions: typing.Optional[str] = None, + selected_model: typing.Optional[SynthesizeDataRequestSelectedModel] = None, + avoid_repetition: typing.Optional[bool] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[float] = None, + max_tokens: typing.Optional[int] = None, + sampling_temperature: typing.Optional[float] = None, + response_format_type: typing.Optional[SynthesizeDataRequestResponseFormatType] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> DocExtractPageOutput: """ Parameters ---------- + documents : typing.List[core.File] + See core.File for more documentation + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.List[SynthesizeDataRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + sheet_url : typing.Optional[core.File] + See core.File for more documentation + + selected_asr_model : typing.Optional[SynthesizeDataRequestSelectedAsrModel] + + google_translate_target : typing.Optional[str] + + glossary_document : typing.Optional[core.File] + See core.File for more documentation + + task_instructions : typing.Optional[str] + + selected_model : typing.Optional[SynthesizeDataRequestSelectedModel] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[SynthesizeDataRequestResponseFormatType] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - FunctionsPageStatusResponse + DocExtractPageOutput Successful Response Examples @@ -2328,42 +5738,135 @@ async def post_v3functions_async( async def main() -> None: - await client.post_v3functions_async() + await client.synthesize_data() asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/functions/async", + "v3/doc-extract/async", method="POST", + params={ + "example_id": example_id, + }, + data={ + "functions": functions, + "variables": variables, + "selected_asr_model": selected_asr_model, + "google_translate_target": google_translate_target, + "task_instructions": task_instructions, + "selected_model": selected_model, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, + files={ + "documents": documents, + "sheet_url": sheet_url, + "glossary_document": glossary_document, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - FunctionsPageStatusResponse, + DocExtractPageOutput, parse_obj_as( - type_=FunctionsPageStatusResponse, # type: ignore + type_=DocExtractPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3lipsync_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> LipsyncPageStatusResponse: + async def llm( + self, + *, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[CompareLlmPageRequestFunctionsItem]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + input_prompt: typing.Optional[str] = OMIT, + selected_models: typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[CompareLlmPageRequestResponseFormatType] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> CompareLlmPageOutput: """ Parameters ---------- + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[CompareLlmPageRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + input_prompt : typing.Optional[str] + + selected_models : typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[CompareLlmPageRequestResponseFormatType] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - LipsyncPageStatusResponse + CompareLlmPageOutput Successful Response Examples @@ -2378,42 +5881,165 @@ async def post_v3lipsync_async( async def main() -> None: - await client.post_v3lipsync_async() + await client.llm() asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/Lipsync/async", + "v3/CompareLLM/async", method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "input_prompt": input_prompt, + "selected_models": selected_models, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - LipsyncPageStatusResponse, + CompareLlmPageOutput, parse_obj_as( - type_=LipsyncPageStatusResponse, # type: ignore + type_=CompareLlmPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3lipsync_tts_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> LipsyncTtsPageStatusResponse: + async def rag( + self, + *, + search_query: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[DocSearchPageRequestFunctionsItem]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + keyword_query: typing.Optional[DocSearchPageRequestKeywordQuery] = OMIT, + documents: typing.Optional[typing.Sequence[str]] = OMIT, + max_references: typing.Optional[int] = OMIT, + max_context_words: typing.Optional[int] = OMIT, + scroll_jump: typing.Optional[int] = OMIT, + doc_extract_url: typing.Optional[str] = OMIT, + embedding_model: typing.Optional[DocSearchPageRequestEmbeddingModel] = OMIT, + dense_weight: typing.Optional[float] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + query_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[DocSearchPageRequestSelectedModel] = OMIT, + citation_style: typing.Optional[DocSearchPageRequestCitationStyle] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[DocSearchPageRequestResponseFormatType] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> DocSearchPageOutput: """ Parameters ---------- + search_query : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[DocSearchPageRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + keyword_query : typing.Optional[DocSearchPageRequestKeywordQuery] + + documents : typing.Optional[typing.Sequence[str]] + + max_references : typing.Optional[int] + + max_context_words : typing.Optional[int] + + scroll_jump : typing.Optional[int] + + doc_extract_url : typing.Optional[str] + + embedding_model : typing.Optional[DocSearchPageRequestEmbeddingModel] + + dense_weight : typing.Optional[float] + + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + + task_instructions : typing.Optional[str] + + query_instructions : typing.Optional[str] + + selected_model : typing.Optional[DocSearchPageRequestSelectedModel] + + citation_style : typing.Optional[DocSearchPageRequestCitationStyle] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[DocSearchPageRequestResponseFormatType] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - LipsyncTtsPageStatusResponse + DocSearchPageOutput Successful Response Examples @@ -2428,42 +6054,157 @@ async def post_v3lipsync_tts_async( async def main() -> None: - await client.post_v3lipsync_tts_async() + await client.rag( + search_query="search_query", + ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/LipsyncTTS/async", + "v3/doc-search/async", method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "search_query": search_query, + "keyword_query": keyword_query, + "documents": documents, + "max_references": max_references, + "max_context_words": max_context_words, + "scroll_jump": scroll_jump, + "doc_extract_url": doc_extract_url, + "embedding_model": embedding_model, + "dense_weight": dense_weight, + "task_instructions": task_instructions, + "query_instructions": query_instructions, + "selected_model": selected_model, + "citation_style": citation_style, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - LipsyncTtsPageStatusResponse, + DocSearchPageOutput, parse_obj_as( - type_=LipsyncTtsPageStatusResponse, # type: ignore + type_=DocSearchPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3text_to_speech_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> TextToSpeechPageStatusResponse: + async def doc_summary( + self, + *, + documents: typing.List[core.File], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.List[DocSummaryRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + task_instructions: typing.Optional[str] = None, + merge_instructions: typing.Optional[str] = None, + selected_model: typing.Optional[DocSummaryRequestSelectedModel] = None, + chain_type: typing.Optional[typing.Literal["map_reduce"]] = None, + selected_asr_model: typing.Optional[DocSummaryRequestSelectedAsrModel] = None, + google_translate_target: typing.Optional[str] = None, + avoid_repetition: typing.Optional[bool] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[float] = None, + max_tokens: typing.Optional[int] = None, + sampling_temperature: typing.Optional[float] = None, + response_format_type: typing.Optional[DocSummaryRequestResponseFormatType] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> DocSummaryPageOutput: """ Parameters ---------- + documents : typing.List[core.File] + See core.File for more documentation + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.List[DocSummaryRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + task_instructions : typing.Optional[str] + + merge_instructions : typing.Optional[str] + + selected_model : typing.Optional[DocSummaryRequestSelectedModel] + + chain_type : typing.Optional[typing.Literal["map_reduce"]] + + selected_asr_model : typing.Optional[DocSummaryRequestSelectedAsrModel] + + google_translate_target : typing.Optional[str] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[DocSummaryRequestResponseFormatType] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - TextToSpeechPageStatusResponse + DocSummaryPageOutput Successful Response Examples @@ -2478,42 +6219,191 @@ async def post_v3text_to_speech_async( async def main() -> None: - await client.post_v3text_to_speech_async() + await client.doc_summary() asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/TextToSpeech/async", + "v3/doc-summary/async", method="POST", + params={ + "example_id": example_id, + }, + data={ + "functions": functions, + "variables": variables, + "task_instructions": task_instructions, + "merge_instructions": merge_instructions, + "selected_model": selected_model, + "chain_type": chain_type, + "selected_asr_model": selected_asr_model, + "google_translate_target": google_translate_target, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, + files={ + "documents": documents, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - TextToSpeechPageStatusResponse, + DocSummaryPageOutput, parse_obj_as( - type_=TextToSpeechPageStatusResponse, # type: ignore + type_=DocSummaryPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3asr_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> AsrPageStatusResponse: + async def lipsync_tts( + self, + *, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.List[LipsyncTtsRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + tts_provider: typing.Optional[LipsyncTtsRequestTtsProvider] = None, + uberduck_voice_name: typing.Optional[str] = None, + uberduck_speaking_rate: typing.Optional[float] = None, + google_voice_name: typing.Optional[str] = None, + google_speaking_rate: typing.Optional[float] = None, + google_pitch: typing.Optional[float] = None, + bark_history_prompt: typing.Optional[str] = None, + elevenlabs_voice_name: typing.Optional[str] = None, + elevenlabs_api_key: typing.Optional[str] = None, + elevenlabs_voice_id: typing.Optional[str] = None, + elevenlabs_model: typing.Optional[str] = None, + elevenlabs_stability: typing.Optional[float] = None, + elevenlabs_similarity_boost: typing.Optional[float] = None, + elevenlabs_style: typing.Optional[float] = None, + elevenlabs_speaker_boost: typing.Optional[bool] = None, + azure_voice_name: typing.Optional[str] = None, + openai_voice_name: typing.Optional[LipsyncTtsRequestOpenaiVoiceName] = None, + openai_tts_model: typing.Optional[LipsyncTtsRequestOpenaiTtsModel] = None, + input_face: typing.Optional[core.File] = None, + face_padding_top: typing.Optional[int] = None, + face_padding_bottom: typing.Optional[int] = None, + face_padding_left: typing.Optional[int] = None, + face_padding_right: typing.Optional[int] = None, + sadtalker_settings: typing.Optional[LipsyncTtsRequestSadtalkerSettings] = None, + selected_model: typing.Optional[LipsyncTtsRequestSelectedModel] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> LipsyncTtsPageOutput: """ Parameters ---------- + text_prompt : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.List[LipsyncTtsRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + tts_provider : typing.Optional[LipsyncTtsRequestTtsProvider] + + uberduck_voice_name : typing.Optional[str] + + uberduck_speaking_rate : typing.Optional[float] + + google_voice_name : typing.Optional[str] + + google_speaking_rate : typing.Optional[float] + + google_pitch : typing.Optional[float] + + bark_history_prompt : typing.Optional[str] + + elevenlabs_voice_name : typing.Optional[str] + Use `elevenlabs_voice_id` instead + + elevenlabs_api_key : typing.Optional[str] + + elevenlabs_voice_id : typing.Optional[str] + + elevenlabs_model : typing.Optional[str] + + elevenlabs_stability : typing.Optional[float] + + elevenlabs_similarity_boost : typing.Optional[float] + + elevenlabs_style : typing.Optional[float] + + elevenlabs_speaker_boost : typing.Optional[bool] + + azure_voice_name : typing.Optional[str] + + openai_voice_name : typing.Optional[LipsyncTtsRequestOpenaiVoiceName] + + openai_tts_model : typing.Optional[LipsyncTtsRequestOpenaiTtsModel] + + input_face : typing.Optional[core.File] + See core.File for more documentation + + face_padding_top : typing.Optional[int] + + face_padding_bottom : typing.Optional[int] + + face_padding_left : typing.Optional[int] + + face_padding_right : typing.Optional[int] + + sadtalker_settings : typing.Optional[LipsyncTtsRequestSadtalkerSettings] + + selected_model : typing.Optional[LipsyncTtsRequestSelectedModel] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - AsrPageStatusResponse + LipsyncTtsPageOutput Successful Response Examples @@ -2528,42 +6418,184 @@ async def post_v3asr_async( async def main() -> None: - await client.post_v3asr_async() + await client.lipsync_tts( + text_prompt="text_prompt", + ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/asr/async", + "v3/LipsyncTTS/async", method="POST", + params={ + "example_id": example_id, + }, + data={ + "functions": functions, + "variables": variables, + "text_prompt": text_prompt, + "tts_provider": tts_provider, + "uberduck_voice_name": uberduck_voice_name, + "uberduck_speaking_rate": uberduck_speaking_rate, + "google_voice_name": google_voice_name, + "google_speaking_rate": google_speaking_rate, + "google_pitch": google_pitch, + "bark_history_prompt": bark_history_prompt, + "elevenlabs_voice_name": elevenlabs_voice_name, + "elevenlabs_api_key": elevenlabs_api_key, + "elevenlabs_voice_id": elevenlabs_voice_id, + "elevenlabs_model": elevenlabs_model, + "elevenlabs_stability": elevenlabs_stability, + "elevenlabs_similarity_boost": elevenlabs_similarity_boost, + "elevenlabs_style": elevenlabs_style, + "elevenlabs_speaker_boost": elevenlabs_speaker_boost, + "azure_voice_name": azure_voice_name, + "openai_voice_name": openai_voice_name, + "openai_tts_model": openai_tts_model, + "face_padding_top": face_padding_top, + "face_padding_bottom": face_padding_bottom, + "face_padding_left": face_padding_left, + "face_padding_right": face_padding_right, + "sadtalker_settings": sadtalker_settings, + "selected_model": selected_model, + "settings": settings, + }, + files={ + "input_face": input_face, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - AsrPageStatusResponse, + LipsyncTtsPageOutput, parse_obj_as( - type_=AsrPageStatusResponse, # type: ignore + type_=LipsyncTtsPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3text2audio_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> Text2AudioPageStatusResponse: + async def text_to_speech( + self, + *, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[TextToSpeechPageRequestFunctionsItem]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + tts_provider: typing.Optional[TextToSpeechPageRequestTtsProvider] = OMIT, + uberduck_voice_name: typing.Optional[str] = OMIT, + uberduck_speaking_rate: typing.Optional[float] = OMIT, + google_voice_name: typing.Optional[str] = OMIT, + google_speaking_rate: typing.Optional[float] = OMIT, + google_pitch: typing.Optional[float] = OMIT, + bark_history_prompt: typing.Optional[str] = OMIT, + elevenlabs_voice_name: typing.Optional[str] = OMIT, + elevenlabs_api_key: typing.Optional[str] = OMIT, + elevenlabs_voice_id: typing.Optional[str] = OMIT, + elevenlabs_model: typing.Optional[str] = OMIT, + elevenlabs_stability: typing.Optional[float] = OMIT, + elevenlabs_similarity_boost: typing.Optional[float] = OMIT, + elevenlabs_style: typing.Optional[float] = OMIT, + elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, + azure_voice_name: typing.Optional[str] = OMIT, + openai_voice_name: typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] = OMIT, + openai_tts_model: typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> TextToSpeechPageOutput: """ Parameters ---------- + text_prompt : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[TextToSpeechPageRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + tts_provider : typing.Optional[TextToSpeechPageRequestTtsProvider] + + uberduck_voice_name : typing.Optional[str] + + uberduck_speaking_rate : typing.Optional[float] + + google_voice_name : typing.Optional[str] + + google_speaking_rate : typing.Optional[float] + + google_pitch : typing.Optional[float] + + bark_history_prompt : typing.Optional[str] + + elevenlabs_voice_name : typing.Optional[str] + Use `elevenlabs_voice_id` instead + + elevenlabs_api_key : typing.Optional[str] + + elevenlabs_voice_id : typing.Optional[str] + + elevenlabs_model : typing.Optional[str] + + elevenlabs_stability : typing.Optional[float] + + elevenlabs_similarity_boost : typing.Optional[float] + + elevenlabs_style : typing.Optional[float] + + elevenlabs_speaker_boost : typing.Optional[bool] + + azure_voice_name : typing.Optional[str] + + openai_voice_name : typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] + + openai_tts_model : typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - Text2AudioPageStatusResponse + TextToSpeechPageOutput Successful Response Examples @@ -2578,42 +6610,147 @@ async def post_v3text2audio_async( async def main() -> None: - await client.post_v3text2audio_async() + await client.text_to_speech( + text_prompt="text_prompt", + ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/text2audio/async", + "v3/TextToSpeech/async", method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "text_prompt": text_prompt, + "tts_provider": tts_provider, + "uberduck_voice_name": uberduck_voice_name, + "uberduck_speaking_rate": uberduck_speaking_rate, + "google_voice_name": google_voice_name, + "google_speaking_rate": google_speaking_rate, + "google_pitch": google_pitch, + "bark_history_prompt": bark_history_prompt, + "elevenlabs_voice_name": elevenlabs_voice_name, + "elevenlabs_api_key": elevenlabs_api_key, + "elevenlabs_voice_id": elevenlabs_voice_id, + "elevenlabs_model": elevenlabs_model, + "elevenlabs_stability": elevenlabs_stability, + "elevenlabs_similarity_boost": elevenlabs_similarity_boost, + "elevenlabs_style": elevenlabs_style, + "elevenlabs_speaker_boost": elevenlabs_speaker_boost, + "azure_voice_name": azure_voice_name, + "openai_voice_name": openai_voice_name, + "openai_tts_model": openai_tts_model, + "settings": settings, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - Text2AudioPageStatusResponse, + TextToSpeechPageOutput, parse_obj_as( - type_=Text2AudioPageStatusResponse, # type: ignore + type_=TextToSpeechPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3translate_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> TranslationPageStatusResponse: + async def speech_recognition( + self, + *, + documents: typing.List[core.File], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.List[SpeechRecognitionRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + selected_model: typing.Optional[SpeechRecognitionRequestSelectedModel] = None, + language: typing.Optional[str] = None, + translation_model: typing.Optional[SpeechRecognitionRequestTranslationModel] = None, + output_format: typing.Optional[SpeechRecognitionRequestOutputFormat] = None, + google_translate_target: typing.Optional[str] = None, + translation_source: typing.Optional[str] = None, + translation_target: typing.Optional[str] = None, + glossary_document: typing.Optional[core.File] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsrPageOutput: """ Parameters ---------- + documents : typing.List[core.File] + See core.File for more documentation + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.List[SpeechRecognitionRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + selected_model : typing.Optional[SpeechRecognitionRequestSelectedModel] + + language : typing.Optional[str] + + translation_model : typing.Optional[SpeechRecognitionRequestTranslationModel] + + output_format : typing.Optional[SpeechRecognitionRequestOutputFormat] + + google_translate_target : typing.Optional[str] + use `translation_model` & `translation_target` instead. + + translation_source : typing.Optional[str] + + translation_target : typing.Optional[str] + + glossary_document : typing.Optional[core.File] + See core.File for more documentation + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - TranslationPageStatusResponse + AsrPageOutput Successful Response Examples @@ -2628,42 +6765,134 @@ async def post_v3translate_async( async def main() -> None: - await client.post_v3translate_async() + await client.speech_recognition() asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/translate/async", + "v3/asr/async", method="POST", + params={ + "example_id": example_id, + }, + data={ + "functions": functions, + "variables": variables, + "selected_model": selected_model, + "language": language, + "translation_model": translation_model, + "output_format": output_format, + "google_translate_target": google_translate_target, + "translation_source": translation_source, + "translation_target": translation_target, + "settings": settings, + }, + files={ + "documents": documents, + "glossary_document": glossary_document, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - TranslationPageStatusResponse, + AsrPageOutput, parse_obj_as( - type_=TranslationPageStatusResponse, # type: ignore + type_=AsrPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3img2img_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> Img2ImgPageStatusResponse: + async def text_to_music( + self, + *, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[Text2AudioPageRequestFunctionsItem]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + duration_sec: typing.Optional[float] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + seed: typing.Optional[int] = OMIT, + sd2upscaling: typing.Optional[bool] = OMIT, + selected_models: typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> Text2AudioPageOutput: """ Parameters ---------- + text_prompt : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[Text2AudioPageRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + negative_prompt : typing.Optional[str] + + duration_sec : typing.Optional[float] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + seed : typing.Optional[int] + + sd2upscaling : typing.Optional[bool] + + selected_models : typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - Img2ImgPageStatusResponse + Text2AudioPageOutput Successful Response Examples @@ -2678,42 +6907,123 @@ async def post_v3img2img_async( async def main() -> None: - await client.post_v3img2img_async() + await client.text_to_music( + text_prompt="text_prompt", + ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/Img2Img/async", + "v3/text2audio/async", method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "text_prompt": text_prompt, + "negative_prompt": negative_prompt, + "duration_sec": duration_sec, + "num_outputs": num_outputs, + "quality": quality, + "guidance_scale": guidance_scale, + "seed": seed, + "sd_2_upscaling": sd2upscaling, + "selected_models": selected_models, + "settings": settings, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - Img2ImgPageStatusResponse, + Text2AudioPageOutput, parse_obj_as( - type_=Img2ImgPageStatusResponse, # type: ignore + type_=Text2AudioPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3compare_text2img_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> CompareText2ImgPageStatusResponse: + async def translate( + self, + *, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.List[TranslateRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + texts: typing.Optional[typing.List[str]] = None, + selected_model: typing.Optional[TranslateRequestSelectedModel] = None, + translation_source: typing.Optional[str] = None, + translation_target: typing.Optional[str] = None, + glossary_document: typing.Optional[core.File] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> TranslationPageOutput: """ Parameters ---------- + example_id : typing.Optional[str] + + functions : typing.Optional[typing.List[TranslateRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + texts : typing.Optional[typing.List[str]] + + selected_model : typing.Optional[TranslateRequestSelectedModel] + + translation_source : typing.Optional[str] + + translation_target : typing.Optional[str] + + glossary_document : typing.Optional[core.File] + See core.File for more documentation + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - CompareText2ImgPageStatusResponse + TranslationPageOutput Successful Response Examples @@ -2728,42 +7038,146 @@ async def post_v3compare_text2img_async( async def main() -> None: - await client.post_v3compare_text2img_async() + await client.translate() asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/CompareText2Img/async", + "v3/translate/async", method="POST", + params={ + "example_id": example_id, + }, + data={ + "functions": functions, + "variables": variables, + "texts": texts, + "selected_model": selected_model, + "translation_source": translation_source, + "translation_target": translation_target, + "settings": settings, + }, + files={ + "glossary_document": glossary_document, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - CompareText2ImgPageStatusResponse, + TranslationPageOutput, parse_obj_as( - type_=CompareText2ImgPageStatusResponse, # type: ignore + type_=TranslationPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3object_inpainting_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> ObjectInpaintingPageStatusResponse: + async def remix_image( + self, + *, + input_image: core.File, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.List[RemixImageRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + text_prompt: typing.Optional[str] = None, + selected_model: typing.Optional[RemixImageRequestSelectedModel] = None, + selected_controlnet_model: typing.Optional[RemixImageRequestSelectedControlnetModel] = None, + negative_prompt: typing.Optional[str] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[int] = None, + output_width: typing.Optional[int] = None, + output_height: typing.Optional[int] = None, + guidance_scale: typing.Optional[float] = None, + prompt_strength: typing.Optional[float] = None, + controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None, + seed: typing.Optional[int] = None, + image_guidance_scale: typing.Optional[float] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> Img2ImgPageOutput: """ Parameters ---------- + input_image : core.File + See core.File for more documentation + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.List[RemixImageRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + text_prompt : typing.Optional[str] + + selected_model : typing.Optional[RemixImageRequestSelectedModel] + + selected_controlnet_model : typing.Optional[RemixImageRequestSelectedControlnetModel] + + negative_prompt : typing.Optional[str] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + output_width : typing.Optional[int] + + output_height : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + prompt_strength : typing.Optional[float] + + controlnet_conditioning_scale : typing.Optional[typing.List[float]] + + seed : typing.Optional[int] + + image_guidance_scale : typing.Optional[float] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - ObjectInpaintingPageStatusResponse + Img2ImgPageOutput Successful Response Examples @@ -2778,42 +7192,157 @@ async def post_v3object_inpainting_async( async def main() -> None: - await client.post_v3object_inpainting_async() + await client.remix_image() asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/ObjectInpainting/async", + "v3/Img2Img/async", method="POST", + params={ + "example_id": example_id, + }, + data={ + "functions": functions, + "variables": variables, + "text_prompt": text_prompt, + "selected_model": selected_model, + "selected_controlnet_model": selected_controlnet_model, + "negative_prompt": negative_prompt, + "num_outputs": num_outputs, + "quality": quality, + "output_width": output_width, + "output_height": output_height, + "guidance_scale": guidance_scale, + "prompt_strength": prompt_strength, + "controlnet_conditioning_scale": controlnet_conditioning_scale, + "seed": seed, + "image_guidance_scale": image_guidance_scale, + "settings": settings, + }, + files={ + "input_image": input_image, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - ObjectInpaintingPageStatusResponse, + Img2ImgPageOutput, parse_obj_as( - type_=ObjectInpaintingPageStatusResponse, # type: ignore + type_=Img2ImgPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3face_inpainting_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> FaceInpaintingPageStatusResponse: + async def text_to_image( + self, + *, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[CompareText2ImgPageRequestFunctionsItem]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + output_width: typing.Optional[int] = OMIT, + output_height: typing.Optional[int] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + dall_e3quality: typing.Optional[str] = OMIT, + dall_e3style: typing.Optional[str] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + seed: typing.Optional[int] = OMIT, + sd2upscaling: typing.Optional[bool] = OMIT, + selected_models: typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] = OMIT, + scheduler: typing.Optional[CompareText2ImgPageRequestScheduler] = OMIT, + edit_instruction: typing.Optional[str] = OMIT, + image_guidance_scale: typing.Optional[float] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> CompareText2ImgPageOutput: """ Parameters ---------- + text_prompt : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[CompareText2ImgPageRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + negative_prompt : typing.Optional[str] + + output_width : typing.Optional[int] + + output_height : typing.Optional[int] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + dall_e3quality : typing.Optional[str] + + dall_e3style : typing.Optional[str] + + guidance_scale : typing.Optional[float] + + seed : typing.Optional[int] + + sd2upscaling : typing.Optional[bool] + + selected_models : typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] + + scheduler : typing.Optional[CompareText2ImgPageRequestScheduler] + + edit_instruction : typing.Optional[str] + + image_guidance_scale : typing.Optional[float] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - FaceInpaintingPageStatusResponse + CompareText2ImgPageOutput Successful Response Examples @@ -2828,42 +7357,159 @@ async def post_v3face_inpainting_async( async def main() -> None: - await client.post_v3face_inpainting_async() + await client.text_to_image( + text_prompt="text_prompt", + ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/FaceInpainting/async", + "v3/CompareText2Img/async", method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "text_prompt": text_prompt, + "negative_prompt": negative_prompt, + "output_width": output_width, + "output_height": output_height, + "num_outputs": num_outputs, + "quality": quality, + "dall_e_3_quality": dall_e3quality, + "dall_e_3_style": dall_e3style, + "guidance_scale": guidance_scale, + "seed": seed, + "sd_2_upscaling": sd2upscaling, + "selected_models": selected_models, + "scheduler": scheduler, + "edit_instruction": edit_instruction, + "image_guidance_scale": image_guidance_scale, + "settings": settings, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - FaceInpaintingPageStatusResponse, + CompareText2ImgPageOutput, parse_obj_as( - type_=FaceInpaintingPageStatusResponse, # type: ignore + type_=CompareText2ImgPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3email_face_inpainting_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> EmailFaceInpaintingPageStatusResponse: + async def product_image( + self, + *, + input_image: core.File, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.List[ProductImageRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + obj_scale: typing.Optional[float] = None, + obj_pos_x: typing.Optional[float] = None, + obj_pos_y: typing.Optional[float] = None, + mask_threshold: typing.Optional[float] = None, + selected_model: typing.Optional[ProductImageRequestSelectedModel] = None, + negative_prompt: typing.Optional[str] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[int] = None, + output_width: typing.Optional[int] = None, + output_height: typing.Optional[int] = None, + guidance_scale: typing.Optional[float] = None, + sd2upscaling: typing.Optional[bool] = None, + seed: typing.Optional[int] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> ObjectInpaintingPageOutput: """ Parameters ---------- + input_image : core.File + See core.File for more documentation + + text_prompt : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.List[ProductImageRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + obj_scale : typing.Optional[float] + + obj_pos_x : typing.Optional[float] + + obj_pos_y : typing.Optional[float] + + mask_threshold : typing.Optional[float] + + selected_model : typing.Optional[ProductImageRequestSelectedModel] + + negative_prompt : typing.Optional[str] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + output_width : typing.Optional[int] + + output_height : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + sd2upscaling : typing.Optional[bool] + + seed : typing.Optional[int] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - EmailFaceInpaintingPageStatusResponse + ObjectInpaintingPageOutput Successful Response Examples @@ -2878,42 +7524,158 @@ async def post_v3email_face_inpainting_async( async def main() -> None: - await client.post_v3email_face_inpainting_async() + await client.product_image( + text_prompt="text_prompt", + ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/EmailFaceInpainting/async", + "v3/ObjectInpainting/async", method="POST", + params={ + "example_id": example_id, + }, + data={ + "functions": functions, + "variables": variables, + "text_prompt": text_prompt, + "obj_scale": obj_scale, + "obj_pos_x": obj_pos_x, + "obj_pos_y": obj_pos_y, + "mask_threshold": mask_threshold, + "selected_model": selected_model, + "negative_prompt": negative_prompt, + "num_outputs": num_outputs, + "quality": quality, + "output_width": output_width, + "output_height": output_height, + "guidance_scale": guidance_scale, + "sd_2_upscaling": sd2upscaling, + "seed": seed, + "settings": settings, + }, + files={ + "input_image": input_image, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - EmailFaceInpaintingPageStatusResponse, + ObjectInpaintingPageOutput, parse_obj_as( - type_=EmailFaceInpaintingPageStatusResponse, # type: ignore + type_=ObjectInpaintingPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3google_image_gen_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> GoogleImageGenPageStatusResponse: + async def portrait( + self, + *, + input_image: core.File, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.List[PortraitRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + face_scale: typing.Optional[float] = None, + face_pos_x: typing.Optional[float] = None, + face_pos_y: typing.Optional[float] = None, + selected_model: typing.Optional[PortraitRequestSelectedModel] = None, + negative_prompt: typing.Optional[str] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[int] = None, + upscale_factor: typing.Optional[float] = None, + output_width: typing.Optional[int] = None, + output_height: typing.Optional[int] = None, + guidance_scale: typing.Optional[float] = None, + seed: typing.Optional[int] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> FaceInpaintingPageOutput: """ Parameters ---------- + input_image : core.File + See core.File for more documentation + + text_prompt : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.List[PortraitRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + face_scale : typing.Optional[float] + + face_pos_x : typing.Optional[float] + + face_pos_y : typing.Optional[float] + + selected_model : typing.Optional[PortraitRequestSelectedModel] + + negative_prompt : typing.Optional[str] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + upscale_factor : typing.Optional[float] + + output_width : typing.Optional[int] + + output_height : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + seed : typing.Optional[int] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - GoogleImageGenPageStatusResponse + FaceInpaintingPageOutput Successful Response Examples @@ -2928,42 +7690,183 @@ async def post_v3google_image_gen_async( async def main() -> None: - await client.post_v3google_image_gen_async() + await client.portrait( + text_prompt="text_prompt", + ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/GoogleImageGen/async", + "v3/FaceInpainting/async", method="POST", + params={ + "example_id": example_id, + }, + data={ + "functions": functions, + "variables": variables, + "text_prompt": text_prompt, + "face_scale": face_scale, + "face_pos_x": face_pos_x, + "face_pos_y": face_pos_y, + "selected_model": selected_model, + "negative_prompt": negative_prompt, + "num_outputs": num_outputs, + "quality": quality, + "upscale_factor": upscale_factor, + "output_width": output_width, + "output_height": output_height, + "guidance_scale": guidance_scale, + "seed": seed, + "settings": settings, + }, + files={ + "input_image": input_image, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - GoogleImageGenPageStatusResponse, + FaceInpaintingPageOutput, parse_obj_as( - type_=GoogleImageGenPageStatusResponse, # type: ignore + type_=FaceInpaintingPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3image_segmentation_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> ImageSegmentationPageStatusResponse: + async def image_from_email( + self, + *, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[EmailFaceInpaintingPageRequestFunctionsItem]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + email_address: typing.Optional[str] = OMIT, + twitter_handle: typing.Optional[str] = OMIT, + face_scale: typing.Optional[float] = OMIT, + face_pos_x: typing.Optional[float] = OMIT, + face_pos_y: typing.Optional[float] = OMIT, + selected_model: typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + upscale_factor: typing.Optional[float] = OMIT, + output_width: typing.Optional[int] = OMIT, + output_height: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + should_send_email: typing.Optional[bool] = OMIT, + email_from: typing.Optional[str] = OMIT, + email_cc: typing.Optional[str] = OMIT, + email_bcc: typing.Optional[str] = OMIT, + email_subject: typing.Optional[str] = OMIT, + email_body: typing.Optional[str] = OMIT, + email_body_enable_html: typing.Optional[bool] = OMIT, + fallback_email_body: typing.Optional[str] = OMIT, + seed: typing.Optional[int] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> EmailFaceInpaintingPageOutput: """ Parameters ---------- + text_prompt : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[EmailFaceInpaintingPageRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + email_address : typing.Optional[str] + + twitter_handle : typing.Optional[str] + + face_scale : typing.Optional[float] + + face_pos_x : typing.Optional[float] + + face_pos_y : typing.Optional[float] + + selected_model : typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] + + negative_prompt : typing.Optional[str] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + upscale_factor : typing.Optional[float] + + output_width : typing.Optional[int] + + output_height : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + should_send_email : typing.Optional[bool] + + email_from : typing.Optional[str] + + email_cc : typing.Optional[str] + + email_bcc : typing.Optional[str] + + email_subject : typing.Optional[str] + + email_body : typing.Optional[str] + + email_body_enable_html : typing.Optional[bool] + + fallback_email_body : typing.Optional[str] + + seed : typing.Optional[int] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - ImageSegmentationPageStatusResponse + EmailFaceInpaintingPageOutput Successful Response Examples @@ -2978,42 +7881,162 @@ async def post_v3image_segmentation_async( async def main() -> None: - await client.post_v3image_segmentation_async() + await client.image_from_email( + email_address="sean@dara.network", + text_prompt="winter's day in paris", + ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/ImageSegmentation/async", + "v3/EmailFaceInpainting/async", method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "email_address": email_address, + "twitter_handle": twitter_handle, + "text_prompt": text_prompt, + "face_scale": face_scale, + "face_pos_x": face_pos_x, + "face_pos_y": face_pos_y, + "selected_model": selected_model, + "negative_prompt": negative_prompt, + "num_outputs": num_outputs, + "quality": quality, + "upscale_factor": upscale_factor, + "output_width": output_width, + "output_height": output_height, + "guidance_scale": guidance_scale, + "should_send_email": should_send_email, + "email_from": email_from, + "email_cc": email_cc, + "email_bcc": email_bcc, + "email_subject": email_subject, + "email_body": email_body, + "email_body_enable_html": email_body_enable_html, + "fallback_email_body": fallback_email_body, + "seed": seed, + "settings": settings, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - ImageSegmentationPageStatusResponse, + EmailFaceInpaintingPageOutput, parse_obj_as( - type_=ImageSegmentationPageStatusResponse, # type: ignore + type_=EmailFaceInpaintingPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3compare_ai_upscalers_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> CompareUpscalerPageStatusResponse: + async def image_from_web_search( + self, + *, + search_query: str, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[GoogleImageGenPageRequestFunctionsItem]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, + selected_model: typing.Optional[GoogleImageGenPageRequestSelectedModel] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + prompt_strength: typing.Optional[float] = OMIT, + sd2upscaling: typing.Optional[bool] = OMIT, + seed: typing.Optional[int] = OMIT, + image_guidance_scale: typing.Optional[float] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> GoogleImageGenPageOutput: """ Parameters ---------- + search_query : str + + text_prompt : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[GoogleImageGenPageRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + serp_search_location : typing.Optional[SerpSearchLocation] + + scaleserp_locations : typing.Optional[typing.Sequence[str]] + DEPRECATED: use `serp_search_location` instead + + selected_model : typing.Optional[GoogleImageGenPageRequestSelectedModel] + + negative_prompt : typing.Optional[str] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + prompt_strength : typing.Optional[float] + + sd2upscaling : typing.Optional[bool] + + seed : typing.Optional[int] + + image_guidance_scale : typing.Optional[float] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - CompareUpscalerPageStatusResponse + GoogleImageGenPageOutput Successful Response Examples @@ -3028,42 +8051,137 @@ async def post_v3compare_ai_upscalers_async( async def main() -> None: - await client.post_v3compare_ai_upscalers_async() + await client.image_from_web_search( + search_query="search_query", + text_prompt="text_prompt", + ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/compare-ai-upscalers/async", + "v3/GoogleImageGen/async", method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "search_query": search_query, + "text_prompt": text_prompt, + "selected_model": selected_model, + "negative_prompt": negative_prompt, + "num_outputs": num_outputs, + "quality": quality, + "guidance_scale": guidance_scale, + "prompt_strength": prompt_strength, + "sd_2_upscaling": sd2upscaling, + "seed": seed, + "image_guidance_scale": image_guidance_scale, + "settings": settings, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - CompareUpscalerPageStatusResponse, + GoogleImageGenPageOutput, parse_obj_as( - type_=CompareUpscalerPageStatusResponse, # type: ignore + type_=GoogleImageGenPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3chyron_plant_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> ChyronPlantPageStatusResponse: + async def remove_background( + self, + *, + input_image: core.File, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.List[RemoveBackgroundRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + selected_model: typing.Optional[RemoveBackgroundRequestSelectedModel] = None, + mask_threshold: typing.Optional[float] = None, + rect_persepective_transform: typing.Optional[bool] = None, + reflection_opacity: typing.Optional[float] = None, + obj_scale: typing.Optional[float] = None, + obj_pos_x: typing.Optional[float] = None, + obj_pos_y: typing.Optional[float] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> ImageSegmentationPageOutput: """ Parameters ---------- + input_image : core.File + See core.File for more documentation + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.List[RemoveBackgroundRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + selected_model : typing.Optional[RemoveBackgroundRequestSelectedModel] + + mask_threshold : typing.Optional[float] + + rect_persepective_transform : typing.Optional[bool] + + reflection_opacity : typing.Optional[float] + + obj_scale : typing.Optional[float] + + obj_pos_x : typing.Optional[float] + + obj_pos_y : typing.Optional[float] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - ChyronPlantPageStatusResponse + ImageSegmentationPageOutput Successful Response Examples @@ -3078,42 +8196,124 @@ async def post_v3chyron_plant_async( async def main() -> None: - await client.post_v3chyron_plant_async() + await client.remove_background() asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/ChyronPlant/async", + "v3/ImageSegmentation/async", method="POST", + params={ + "example_id": example_id, + }, + data={ + "functions": functions, + "variables": variables, + "selected_model": selected_model, + "mask_threshold": mask_threshold, + "rect_persepective_transform": rect_persepective_transform, + "reflection_opacity": reflection_opacity, + "obj_scale": obj_scale, + "obj_pos_x": obj_pos_x, + "obj_pos_y": obj_pos_y, + "settings": settings, + }, + files={ + "input_image": input_image, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - ChyronPlantPageStatusResponse, + ImageSegmentationPageOutput, parse_obj_as( - type_=ChyronPlantPageStatusResponse, # type: ignore + type_=ImageSegmentationPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3letter_writer_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> LetterWriterPageStatusResponse: + async def upscale( + self, + *, + scale: int, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.List[UpscaleRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + input_image: typing.Optional[core.File] = None, + input_video: typing.Optional[core.File] = None, + selected_models: typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]] = None, + selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> CompareUpscalerPageOutput: """ Parameters ---------- + scale : int + The final upsampling scale of the image + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.List[UpscaleRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + input_image : typing.Optional[core.File] + See core.File for more documentation + + input_video : typing.Optional[core.File] + See core.File for more documentation + + selected_models : typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]] + + selected_bg_model : typing.Optional[typing.Literal["real_esrgan_x2"]] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - LetterWriterPageStatusResponse + CompareUpscalerPageOutput Successful Response Examples @@ -3128,42 +8328,111 @@ async def post_v3letter_writer_async( async def main() -> None: - await client.post_v3letter_writer_async() + await client.upscale( + scale=1, + ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/LetterWriter/async", + "v3/compare-ai-upscalers/async", method="POST", + params={ + "example_id": example_id, + }, + data={ + "functions": functions, + "variables": variables, + "scale": scale, + "selected_models": selected_models, + "selected_bg_model": selected_bg_model, + "settings": settings, + }, + files={ + "input_image": input_image, + "input_video": input_video, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - LetterWriterPageStatusResponse, + CompareUpscalerPageOutput, parse_obj_as( - type_=LetterWriterPageStatusResponse, # type: ignore + type_=CompareUpscalerPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3embeddings_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> EmbeddingsPageStatusResponse: + async def embed( + self, + *, + texts: typing.Sequence[str], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[EmbeddingsPageRequestFunctionsItem]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + selected_model: typing.Optional[EmbeddingsPageRequestSelectedModel] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> EmbeddingsPageOutput: """ Parameters ---------- + texts : typing.Sequence[str] + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[EmbeddingsPageRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + selected_model : typing.Optional[EmbeddingsPageRequestSelectedModel] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - EmbeddingsPageStatusResponse + EmbeddingsPageOutput Successful Response Examples @@ -3178,7 +8447,9 @@ async def post_v3embeddings_async( async def main() -> None: - await client.post_v3embeddings_async() + await client.embed( + texts=["texts"], + ) asyncio.run(main()) @@ -3186,34 +8457,165 @@ async def main() -> None: _response = await self._client_wrapper.httpx_client.request( "v3/embeddings/async", method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "texts": texts, + "selected_model": selected_model, + "settings": settings, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - EmbeddingsPageStatusResponse, + EmbeddingsPageOutput, parse_obj_as( - type_=EmbeddingsPageStatusResponse, # type: ignore + type_=EmbeddingsPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3related_qna_maker_doc_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> RelatedQnADocPageStatusResponse: + async def seo_people_also_ask_doc( + self, + *, + search_query: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RelatedQnADocPageRequestFunctionsItem]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + keyword_query: typing.Optional[RelatedQnADocPageRequestKeywordQuery] = OMIT, + documents: typing.Optional[typing.Sequence[str]] = OMIT, + max_references: typing.Optional[int] = OMIT, + max_context_words: typing.Optional[int] = OMIT, + scroll_jump: typing.Optional[int] = OMIT, + doc_extract_url: typing.Optional[str] = OMIT, + embedding_model: typing.Optional[RelatedQnADocPageRequestEmbeddingModel] = OMIT, + dense_weight: typing.Optional[float] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + query_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[RelatedQnADocPageRequestSelectedModel] = OMIT, + citation_style: typing.Optional[RelatedQnADocPageRequestCitationStyle] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[RelatedQnADocPageRequestResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, + serp_search_type: typing.Optional[SerpSearchType] = OMIT, + scaleserp_search_field: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> RelatedQnADocPageOutput: """ Parameters ---------- + search_query : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RelatedQnADocPageRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + keyword_query : typing.Optional[RelatedQnADocPageRequestKeywordQuery] + + documents : typing.Optional[typing.Sequence[str]] + + max_references : typing.Optional[int] + + max_context_words : typing.Optional[int] + + scroll_jump : typing.Optional[int] + + doc_extract_url : typing.Optional[str] + + embedding_model : typing.Optional[RelatedQnADocPageRequestEmbeddingModel] + + dense_weight : typing.Optional[float] + + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + + task_instructions : typing.Optional[str] + + query_instructions : typing.Optional[str] + + selected_model : typing.Optional[RelatedQnADocPageRequestSelectedModel] + + citation_style : typing.Optional[RelatedQnADocPageRequestCitationStyle] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[RelatedQnADocPageRequestResponseFormatType] + + serp_search_location : typing.Optional[SerpSearchLocation] + + scaleserp_locations : typing.Optional[typing.Sequence[str]] + DEPRECATED: use `serp_search_location` instead + + serp_search_type : typing.Optional[SerpSearchType] + + scaleserp_search_field : typing.Optional[str] + DEPRECATED: use `serp_search_type` instead + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - RelatedQnADocPageStatusResponse + RelatedQnADocPageOutput Successful Response Examples @@ -3228,7 +8630,9 @@ async def post_v3related_qna_maker_doc_async( async def main() -> None: - await client.post_v3related_qna_maker_doc_async() + await client.seo_people_also_ask_doc( + search_query="search_query", + ) asyncio.run(main()) @@ -3236,17 +8640,79 @@ async def main() -> None: _response = await self._client_wrapper.httpx_client.request( "v3/related-qna-maker-doc/async", method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "search_query": search_query, + "keyword_query": keyword_query, + "documents": documents, + "max_references": max_references, + "max_context_words": max_context_words, + "scroll_jump": scroll_jump, + "doc_extract_url": doc_extract_url, + "embedding_model": embedding_model, + "dense_weight": dense_weight, + "task_instructions": task_instructions, + "query_instructions": query_instructions, + "selected_model": selected_model, + "citation_style": citation_style, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "serp_search_type": serp_search_type, + "scaleserp_search_field": scaleserp_search_field, + "settings": settings, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - RelatedQnADocPageStatusResponse, + RelatedQnADocPageOutput, parse_obj_as( - type_=RelatedQnADocPageStatusResponse, # type: ignore + type_=RelatedQnADocPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) diff --git a/src/gooey/copilot_for_your_enterprise/__init__.py b/src/gooey/copilot_for_your_enterprise/__init__.py new file mode 100644 index 0000000..70d45c7 --- /dev/null +++ b/src/gooey/copilot_for_your_enterprise/__init__.py @@ -0,0 +1,47 @@ +# This file was auto-generated by Fern from our API Definition. + +from .types import ( + AsyncVideoBotsRequestAsrModel, + AsyncVideoBotsRequestCitationStyle, + AsyncVideoBotsRequestEmbeddingModel, + AsyncVideoBotsRequestFunctionsItem, + AsyncVideoBotsRequestFunctionsItemTrigger, + AsyncVideoBotsRequestLipsyncModel, + AsyncVideoBotsRequestMessagesItem, + AsyncVideoBotsRequestMessagesItemContent, + AsyncVideoBotsRequestMessagesItemContentItem, + AsyncVideoBotsRequestMessagesItemContentItem_ImageUrl, + AsyncVideoBotsRequestMessagesItemContentItem_Text, + AsyncVideoBotsRequestMessagesItemRole, + AsyncVideoBotsRequestOpenaiTtsModel, + AsyncVideoBotsRequestOpenaiVoiceName, + AsyncVideoBotsRequestResponseFormatType, + AsyncVideoBotsRequestSadtalkerSettings, + AsyncVideoBotsRequestSadtalkerSettingsPreprocess, + AsyncVideoBotsRequestSelectedModel, + AsyncVideoBotsRequestTranslationModel, + AsyncVideoBotsRequestTtsProvider, +) + +__all__ = [ + "AsyncVideoBotsRequestAsrModel", + "AsyncVideoBotsRequestCitationStyle", + "AsyncVideoBotsRequestEmbeddingModel", + "AsyncVideoBotsRequestFunctionsItem", + "AsyncVideoBotsRequestFunctionsItemTrigger", + "AsyncVideoBotsRequestLipsyncModel", + "AsyncVideoBotsRequestMessagesItem", + "AsyncVideoBotsRequestMessagesItemContent", + "AsyncVideoBotsRequestMessagesItemContentItem", + "AsyncVideoBotsRequestMessagesItemContentItem_ImageUrl", + "AsyncVideoBotsRequestMessagesItemContentItem_Text", + "AsyncVideoBotsRequestMessagesItemRole", + "AsyncVideoBotsRequestOpenaiTtsModel", + "AsyncVideoBotsRequestOpenaiVoiceName", + "AsyncVideoBotsRequestResponseFormatType", + "AsyncVideoBotsRequestSadtalkerSettings", + "AsyncVideoBotsRequestSadtalkerSettingsPreprocess", + "AsyncVideoBotsRequestSelectedModel", + "AsyncVideoBotsRequestTranslationModel", + "AsyncVideoBotsRequestTtsProvider", +] diff --git a/src/gooey/copilot_for_your_enterprise/client.py b/src/gooey/copilot_for_your_enterprise/client.py new file mode 100644 index 0000000..f2b6d71 --- /dev/null +++ b/src/gooey/copilot_for_your_enterprise/client.py @@ -0,0 +1,745 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..core.client_wrapper import SyncClientWrapper +from .types.async_video_bots_request_functions_item import AsyncVideoBotsRequestFunctionsItem +from .. import core +from .types.async_video_bots_request_messages_item import AsyncVideoBotsRequestMessagesItem +from .types.async_video_bots_request_selected_model import AsyncVideoBotsRequestSelectedModel +from .types.async_video_bots_request_embedding_model import AsyncVideoBotsRequestEmbeddingModel +from .types.async_video_bots_request_citation_style import AsyncVideoBotsRequestCitationStyle +from .types.async_video_bots_request_asr_model import AsyncVideoBotsRequestAsrModel +from .types.async_video_bots_request_translation_model import AsyncVideoBotsRequestTranslationModel +from .types.async_video_bots_request_lipsync_model import AsyncVideoBotsRequestLipsyncModel +from .types.async_video_bots_request_response_format_type import AsyncVideoBotsRequestResponseFormatType +from .types.async_video_bots_request_tts_provider import AsyncVideoBotsRequestTtsProvider +from .types.async_video_bots_request_openai_voice_name import AsyncVideoBotsRequestOpenaiVoiceName +from .types.async_video_bots_request_openai_tts_model import AsyncVideoBotsRequestOpenaiTtsModel +from .types.async_video_bots_request_sadtalker_settings import AsyncVideoBotsRequestSadtalkerSettings +from ..types.run_settings import RunSettings +from ..core.request_options import RequestOptions +from ..types.video_bots_page_output import VideoBotsPageOutput +from ..core.pydantic_utilities import parse_obj_as +from ..errors.payment_required_error import PaymentRequiredError +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from ..errors.too_many_requests_error import TooManyRequestsError +from ..types.generic_error_response import GenericErrorResponse +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from ..core.client_wrapper import AsyncClientWrapper + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class CopilotForYourEnterpriseClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def async_video_bots( + self, + *, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.List[AsyncVideoBotsRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + input_prompt: typing.Optional[str] = None, + input_audio: typing.Optional[str] = None, + input_images: typing.Optional[typing.List[core.File]] = None, + input_documents: typing.Optional[typing.List[core.File]] = None, + doc_extract_url: typing.Optional[str] = None, + messages: typing.Optional[typing.List[AsyncVideoBotsRequestMessagesItem]] = None, + bot_script: typing.Optional[str] = None, + selected_model: typing.Optional[AsyncVideoBotsRequestSelectedModel] = None, + document_model: typing.Optional[str] = None, + task_instructions: typing.Optional[str] = None, + query_instructions: typing.Optional[str] = None, + keyword_instructions: typing.Optional[str] = None, + documents: typing.Optional[typing.List[core.File]] = None, + max_references: typing.Optional[int] = None, + max_context_words: typing.Optional[int] = None, + scroll_jump: typing.Optional[int] = None, + embedding_model: typing.Optional[AsyncVideoBotsRequestEmbeddingModel] = None, + dense_weight: typing.Optional[float] = None, + citation_style: typing.Optional[AsyncVideoBotsRequestCitationStyle] = None, + use_url_shortener: typing.Optional[bool] = None, + asr_model: typing.Optional[AsyncVideoBotsRequestAsrModel] = None, + asr_language: typing.Optional[str] = None, + translation_model: typing.Optional[AsyncVideoBotsRequestTranslationModel] = None, + user_language: typing.Optional[str] = None, + input_glossary_document: typing.Optional[core.File] = None, + output_glossary_document: typing.Optional[core.File] = None, + lipsync_model: typing.Optional[AsyncVideoBotsRequestLipsyncModel] = None, + tools: typing.Optional[typing.List[typing.Literal["json_to_pdf"]]] = None, + avoid_repetition: typing.Optional[bool] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[float] = None, + max_tokens: typing.Optional[int] = None, + sampling_temperature: typing.Optional[float] = None, + response_format_type: typing.Optional[AsyncVideoBotsRequestResponseFormatType] = None, + tts_provider: typing.Optional[AsyncVideoBotsRequestTtsProvider] = None, + uberduck_voice_name: typing.Optional[str] = None, + uberduck_speaking_rate: typing.Optional[float] = None, + google_voice_name: typing.Optional[str] = None, + google_speaking_rate: typing.Optional[float] = None, + google_pitch: typing.Optional[float] = None, + bark_history_prompt: typing.Optional[str] = None, + elevenlabs_voice_name: typing.Optional[str] = None, + elevenlabs_api_key: typing.Optional[str] = None, + elevenlabs_voice_id: typing.Optional[str] = None, + elevenlabs_model: typing.Optional[str] = None, + elevenlabs_stability: typing.Optional[float] = None, + elevenlabs_similarity_boost: typing.Optional[float] = None, + elevenlabs_style: typing.Optional[float] = None, + elevenlabs_speaker_boost: typing.Optional[bool] = None, + azure_voice_name: typing.Optional[str] = None, + openai_voice_name: typing.Optional[AsyncVideoBotsRequestOpenaiVoiceName] = None, + openai_tts_model: typing.Optional[AsyncVideoBotsRequestOpenaiTtsModel] = None, + input_face: typing.Optional[core.File] = None, + face_padding_top: typing.Optional[int] = None, + face_padding_bottom: typing.Optional[int] = None, + face_padding_left: typing.Optional[int] = None, + face_padding_right: typing.Optional[int] = None, + sadtalker_settings: typing.Optional[AsyncVideoBotsRequestSadtalkerSettings] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> VideoBotsPageOutput: + """ + Parameters + ---------- + example_id : typing.Optional[str] + + functions : typing.Optional[typing.List[AsyncVideoBotsRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + input_prompt : typing.Optional[str] + + input_audio : typing.Optional[str] + + input_images : typing.Optional[typing.List[core.File]] + See core.File for more documentation + + input_documents : typing.Optional[typing.List[core.File]] + See core.File for more documentation + + doc_extract_url : typing.Optional[str] + Select a workflow to extract text from documents and images. + + messages : typing.Optional[typing.List[AsyncVideoBotsRequestMessagesItem]] + + bot_script : typing.Optional[str] + + selected_model : typing.Optional[AsyncVideoBotsRequestSelectedModel] + + document_model : typing.Optional[str] + When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) + + task_instructions : typing.Optional[str] + + query_instructions : typing.Optional[str] + + keyword_instructions : typing.Optional[str] + + documents : typing.Optional[typing.List[core.File]] + See core.File for more documentation + + max_references : typing.Optional[int] + + max_context_words : typing.Optional[int] + + scroll_jump : typing.Optional[int] + + embedding_model : typing.Optional[AsyncVideoBotsRequestEmbeddingModel] + + dense_weight : typing.Optional[float] + + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + + citation_style : typing.Optional[AsyncVideoBotsRequestCitationStyle] + + use_url_shortener : typing.Optional[bool] + + asr_model : typing.Optional[AsyncVideoBotsRequestAsrModel] + Choose a model to transcribe incoming audio messages to text. + + asr_language : typing.Optional[str] + Choose a language to transcribe incoming audio messages to text. + + translation_model : typing.Optional[AsyncVideoBotsRequestTranslationModel] + + user_language : typing.Optional[str] + Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. + + input_glossary_document : typing.Optional[core.File] + See core.File for more documentation + + output_glossary_document : typing.Optional[core.File] + See core.File for more documentation + + lipsync_model : typing.Optional[AsyncVideoBotsRequestLipsyncModel] + + tools : typing.Optional[typing.List[typing.Literal["json_to_pdf"]]] + Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[AsyncVideoBotsRequestResponseFormatType] + + tts_provider : typing.Optional[AsyncVideoBotsRequestTtsProvider] + + uberduck_voice_name : typing.Optional[str] + + uberduck_speaking_rate : typing.Optional[float] + + google_voice_name : typing.Optional[str] + + google_speaking_rate : typing.Optional[float] + + google_pitch : typing.Optional[float] + + bark_history_prompt : typing.Optional[str] + + elevenlabs_voice_name : typing.Optional[str] + Use `elevenlabs_voice_id` instead + + elevenlabs_api_key : typing.Optional[str] + + elevenlabs_voice_id : typing.Optional[str] + + elevenlabs_model : typing.Optional[str] + + elevenlabs_stability : typing.Optional[float] + + elevenlabs_similarity_boost : typing.Optional[float] + + elevenlabs_style : typing.Optional[float] + + elevenlabs_speaker_boost : typing.Optional[bool] + + azure_voice_name : typing.Optional[str] + + openai_voice_name : typing.Optional[AsyncVideoBotsRequestOpenaiVoiceName] + + openai_tts_model : typing.Optional[AsyncVideoBotsRequestOpenaiTtsModel] + + input_face : typing.Optional[core.File] + See core.File for more documentation + + face_padding_top : typing.Optional[int] + + face_padding_bottom : typing.Optional[int] + + face_padding_left : typing.Optional[int] + + face_padding_right : typing.Optional[int] + + sadtalker_settings : typing.Optional[AsyncVideoBotsRequestSadtalkerSettings] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + VideoBotsPageOutput + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.copilot_for_your_enterprise.async_video_bots() + """ + _response = self._client_wrapper.httpx_client.request( + "v3/video-bots/async", + method="POST", + params={ + "example_id": example_id, + }, + data={ + "functions": functions, + "variables": variables, + "input_prompt": input_prompt, + "input_audio": input_audio, + "doc_extract_url": doc_extract_url, + "messages": messages, + "bot_script": bot_script, + "selected_model": selected_model, + "document_model": document_model, + "task_instructions": task_instructions, + "query_instructions": query_instructions, + "keyword_instructions": keyword_instructions, + "max_references": max_references, + "max_context_words": max_context_words, + "scroll_jump": scroll_jump, + "embedding_model": embedding_model, + "dense_weight": dense_weight, + "citation_style": citation_style, + "use_url_shortener": use_url_shortener, + "asr_model": asr_model, + "asr_language": asr_language, + "translation_model": translation_model, + "user_language": user_language, + "lipsync_model": lipsync_model, + "tools": tools, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "tts_provider": tts_provider, + "uberduck_voice_name": uberduck_voice_name, + "uberduck_speaking_rate": uberduck_speaking_rate, + "google_voice_name": google_voice_name, + "google_speaking_rate": google_speaking_rate, + "google_pitch": google_pitch, + "bark_history_prompt": bark_history_prompt, + "elevenlabs_voice_name": elevenlabs_voice_name, + "elevenlabs_api_key": elevenlabs_api_key, + "elevenlabs_voice_id": elevenlabs_voice_id, + "elevenlabs_model": elevenlabs_model, + "elevenlabs_stability": elevenlabs_stability, + "elevenlabs_similarity_boost": elevenlabs_similarity_boost, + "elevenlabs_style": elevenlabs_style, + "elevenlabs_speaker_boost": elevenlabs_speaker_boost, + "azure_voice_name": azure_voice_name, + "openai_voice_name": openai_voice_name, + "openai_tts_model": openai_tts_model, + "face_padding_top": face_padding_top, + "face_padding_bottom": face_padding_bottom, + "face_padding_left": face_padding_left, + "face_padding_right": face_padding_right, + "sadtalker_settings": sadtalker_settings, + "settings": settings, + }, + files={ + "input_images": input_images, + "input_documents": input_documents, + "documents": documents, + "input_glossary_document": input_glossary_document, + "output_glossary_document": output_glossary_document, + "input_face": input_face, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + VideoBotsPageOutput, + parse_obj_as( + type_=VideoBotsPageOutput, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncCopilotForYourEnterpriseClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def async_video_bots( + self, + *, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.List[AsyncVideoBotsRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + input_prompt: typing.Optional[str] = None, + input_audio: typing.Optional[str] = None, + input_images: typing.Optional[typing.List[core.File]] = None, + input_documents: typing.Optional[typing.List[core.File]] = None, + doc_extract_url: typing.Optional[str] = None, + messages: typing.Optional[typing.List[AsyncVideoBotsRequestMessagesItem]] = None, + bot_script: typing.Optional[str] = None, + selected_model: typing.Optional[AsyncVideoBotsRequestSelectedModel] = None, + document_model: typing.Optional[str] = None, + task_instructions: typing.Optional[str] = None, + query_instructions: typing.Optional[str] = None, + keyword_instructions: typing.Optional[str] = None, + documents: typing.Optional[typing.List[core.File]] = None, + max_references: typing.Optional[int] = None, + max_context_words: typing.Optional[int] = None, + scroll_jump: typing.Optional[int] = None, + embedding_model: typing.Optional[AsyncVideoBotsRequestEmbeddingModel] = None, + dense_weight: typing.Optional[float] = None, + citation_style: typing.Optional[AsyncVideoBotsRequestCitationStyle] = None, + use_url_shortener: typing.Optional[bool] = None, + asr_model: typing.Optional[AsyncVideoBotsRequestAsrModel] = None, + asr_language: typing.Optional[str] = None, + translation_model: typing.Optional[AsyncVideoBotsRequestTranslationModel] = None, + user_language: typing.Optional[str] = None, + input_glossary_document: typing.Optional[core.File] = None, + output_glossary_document: typing.Optional[core.File] = None, + lipsync_model: typing.Optional[AsyncVideoBotsRequestLipsyncModel] = None, + tools: typing.Optional[typing.List[typing.Literal["json_to_pdf"]]] = None, + avoid_repetition: typing.Optional[bool] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[float] = None, + max_tokens: typing.Optional[int] = None, + sampling_temperature: typing.Optional[float] = None, + response_format_type: typing.Optional[AsyncVideoBotsRequestResponseFormatType] = None, + tts_provider: typing.Optional[AsyncVideoBotsRequestTtsProvider] = None, + uberduck_voice_name: typing.Optional[str] = None, + uberduck_speaking_rate: typing.Optional[float] = None, + google_voice_name: typing.Optional[str] = None, + google_speaking_rate: typing.Optional[float] = None, + google_pitch: typing.Optional[float] = None, + bark_history_prompt: typing.Optional[str] = None, + elevenlabs_voice_name: typing.Optional[str] = None, + elevenlabs_api_key: typing.Optional[str] = None, + elevenlabs_voice_id: typing.Optional[str] = None, + elevenlabs_model: typing.Optional[str] = None, + elevenlabs_stability: typing.Optional[float] = None, + elevenlabs_similarity_boost: typing.Optional[float] = None, + elevenlabs_style: typing.Optional[float] = None, + elevenlabs_speaker_boost: typing.Optional[bool] = None, + azure_voice_name: typing.Optional[str] = None, + openai_voice_name: typing.Optional[AsyncVideoBotsRequestOpenaiVoiceName] = None, + openai_tts_model: typing.Optional[AsyncVideoBotsRequestOpenaiTtsModel] = None, + input_face: typing.Optional[core.File] = None, + face_padding_top: typing.Optional[int] = None, + face_padding_bottom: typing.Optional[int] = None, + face_padding_left: typing.Optional[int] = None, + face_padding_right: typing.Optional[int] = None, + sadtalker_settings: typing.Optional[AsyncVideoBotsRequestSadtalkerSettings] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> VideoBotsPageOutput: + """ + Parameters + ---------- + example_id : typing.Optional[str] + + functions : typing.Optional[typing.List[AsyncVideoBotsRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + input_prompt : typing.Optional[str] + + input_audio : typing.Optional[str] + + input_images : typing.Optional[typing.List[core.File]] + See core.File for more documentation + + input_documents : typing.Optional[typing.List[core.File]] + See core.File for more documentation + + doc_extract_url : typing.Optional[str] + Select a workflow to extract text from documents and images. + + messages : typing.Optional[typing.List[AsyncVideoBotsRequestMessagesItem]] + + bot_script : typing.Optional[str] + + selected_model : typing.Optional[AsyncVideoBotsRequestSelectedModel] + + document_model : typing.Optional[str] + When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) + + task_instructions : typing.Optional[str] + + query_instructions : typing.Optional[str] + + keyword_instructions : typing.Optional[str] + + documents : typing.Optional[typing.List[core.File]] + See core.File for more documentation + + max_references : typing.Optional[int] + + max_context_words : typing.Optional[int] + + scroll_jump : typing.Optional[int] + + embedding_model : typing.Optional[AsyncVideoBotsRequestEmbeddingModel] + + dense_weight : typing.Optional[float] + + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + + citation_style : typing.Optional[AsyncVideoBotsRequestCitationStyle] + + use_url_shortener : typing.Optional[bool] + + asr_model : typing.Optional[AsyncVideoBotsRequestAsrModel] + Choose a model to transcribe incoming audio messages to text. + + asr_language : typing.Optional[str] + Choose a language to transcribe incoming audio messages to text. + + translation_model : typing.Optional[AsyncVideoBotsRequestTranslationModel] + + user_language : typing.Optional[str] + Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. + + input_glossary_document : typing.Optional[core.File] + See core.File for more documentation + + output_glossary_document : typing.Optional[core.File] + See core.File for more documentation + + lipsync_model : typing.Optional[AsyncVideoBotsRequestLipsyncModel] + + tools : typing.Optional[typing.List[typing.Literal["json_to_pdf"]]] + Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[AsyncVideoBotsRequestResponseFormatType] + + tts_provider : typing.Optional[AsyncVideoBotsRequestTtsProvider] + + uberduck_voice_name : typing.Optional[str] + + uberduck_speaking_rate : typing.Optional[float] + + google_voice_name : typing.Optional[str] + + google_speaking_rate : typing.Optional[float] + + google_pitch : typing.Optional[float] + + bark_history_prompt : typing.Optional[str] + + elevenlabs_voice_name : typing.Optional[str] + Use `elevenlabs_voice_id` instead + + elevenlabs_api_key : typing.Optional[str] + + elevenlabs_voice_id : typing.Optional[str] + + elevenlabs_model : typing.Optional[str] + + elevenlabs_stability : typing.Optional[float] + + elevenlabs_similarity_boost : typing.Optional[float] + + elevenlabs_style : typing.Optional[float] + + elevenlabs_speaker_boost : typing.Optional[bool] + + azure_voice_name : typing.Optional[str] + + openai_voice_name : typing.Optional[AsyncVideoBotsRequestOpenaiVoiceName] + + openai_tts_model : typing.Optional[AsyncVideoBotsRequestOpenaiTtsModel] + + input_face : typing.Optional[core.File] + See core.File for more documentation + + face_padding_top : typing.Optional[int] + + face_padding_bottom : typing.Optional[int] + + face_padding_left : typing.Optional[int] + + face_padding_right : typing.Optional[int] + + sadtalker_settings : typing.Optional[AsyncVideoBotsRequestSadtalkerSettings] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + VideoBotsPageOutput + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.copilot_for_your_enterprise.async_video_bots() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/video-bots/async", + method="POST", + params={ + "example_id": example_id, + }, + data={ + "functions": functions, + "variables": variables, + "input_prompt": input_prompt, + "input_audio": input_audio, + "doc_extract_url": doc_extract_url, + "messages": messages, + "bot_script": bot_script, + "selected_model": selected_model, + "document_model": document_model, + "task_instructions": task_instructions, + "query_instructions": query_instructions, + "keyword_instructions": keyword_instructions, + "max_references": max_references, + "max_context_words": max_context_words, + "scroll_jump": scroll_jump, + "embedding_model": embedding_model, + "dense_weight": dense_weight, + "citation_style": citation_style, + "use_url_shortener": use_url_shortener, + "asr_model": asr_model, + "asr_language": asr_language, + "translation_model": translation_model, + "user_language": user_language, + "lipsync_model": lipsync_model, + "tools": tools, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "tts_provider": tts_provider, + "uberduck_voice_name": uberduck_voice_name, + "uberduck_speaking_rate": uberduck_speaking_rate, + "google_voice_name": google_voice_name, + "google_speaking_rate": google_speaking_rate, + "google_pitch": google_pitch, + "bark_history_prompt": bark_history_prompt, + "elevenlabs_voice_name": elevenlabs_voice_name, + "elevenlabs_api_key": elevenlabs_api_key, + "elevenlabs_voice_id": elevenlabs_voice_id, + "elevenlabs_model": elevenlabs_model, + "elevenlabs_stability": elevenlabs_stability, + "elevenlabs_similarity_boost": elevenlabs_similarity_boost, + "elevenlabs_style": elevenlabs_style, + "elevenlabs_speaker_boost": elevenlabs_speaker_boost, + "azure_voice_name": azure_voice_name, + "openai_voice_name": openai_voice_name, + "openai_tts_model": openai_tts_model, + "face_padding_top": face_padding_top, + "face_padding_bottom": face_padding_bottom, + "face_padding_left": face_padding_left, + "face_padding_right": face_padding_right, + "sadtalker_settings": sadtalker_settings, + "settings": settings, + }, + files={ + "input_images": input_images, + "input_documents": input_documents, + "documents": documents, + "input_glossary_document": input_glossary_document, + "output_glossary_document": output_glossary_document, + "input_face": input_face, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + VideoBotsPageOutput, + parse_obj_as( + type_=VideoBotsPageOutput, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/copilot_for_your_enterprise/types/__init__.py b/src/gooey/copilot_for_your_enterprise/types/__init__.py new file mode 100644 index 0000000..23fe00a --- /dev/null +++ b/src/gooey/copilot_for_your_enterprise/types/__init__.py @@ -0,0 +1,47 @@ +# This file was auto-generated by Fern from our API Definition. + +from .async_video_bots_request_asr_model import AsyncVideoBotsRequestAsrModel +from .async_video_bots_request_citation_style import AsyncVideoBotsRequestCitationStyle +from .async_video_bots_request_embedding_model import AsyncVideoBotsRequestEmbeddingModel +from .async_video_bots_request_functions_item import AsyncVideoBotsRequestFunctionsItem +from .async_video_bots_request_functions_item_trigger import AsyncVideoBotsRequestFunctionsItemTrigger +from .async_video_bots_request_lipsync_model import AsyncVideoBotsRequestLipsyncModel +from .async_video_bots_request_messages_item import AsyncVideoBotsRequestMessagesItem +from .async_video_bots_request_messages_item_content import AsyncVideoBotsRequestMessagesItemContent +from .async_video_bots_request_messages_item_content_item import ( + AsyncVideoBotsRequestMessagesItemContentItem, + AsyncVideoBotsRequestMessagesItemContentItem_ImageUrl, + AsyncVideoBotsRequestMessagesItemContentItem_Text, +) +from .async_video_bots_request_messages_item_role import AsyncVideoBotsRequestMessagesItemRole +from .async_video_bots_request_openai_tts_model import AsyncVideoBotsRequestOpenaiTtsModel +from .async_video_bots_request_openai_voice_name import AsyncVideoBotsRequestOpenaiVoiceName +from .async_video_bots_request_response_format_type import AsyncVideoBotsRequestResponseFormatType +from .async_video_bots_request_sadtalker_settings import AsyncVideoBotsRequestSadtalkerSettings +from .async_video_bots_request_sadtalker_settings_preprocess import AsyncVideoBotsRequestSadtalkerSettingsPreprocess +from .async_video_bots_request_selected_model import AsyncVideoBotsRequestSelectedModel +from .async_video_bots_request_translation_model import AsyncVideoBotsRequestTranslationModel +from .async_video_bots_request_tts_provider import AsyncVideoBotsRequestTtsProvider + +__all__ = [ + "AsyncVideoBotsRequestAsrModel", + "AsyncVideoBotsRequestCitationStyle", + "AsyncVideoBotsRequestEmbeddingModel", + "AsyncVideoBotsRequestFunctionsItem", + "AsyncVideoBotsRequestFunctionsItemTrigger", + "AsyncVideoBotsRequestLipsyncModel", + "AsyncVideoBotsRequestMessagesItem", + "AsyncVideoBotsRequestMessagesItemContent", + "AsyncVideoBotsRequestMessagesItemContentItem", + "AsyncVideoBotsRequestMessagesItemContentItem_ImageUrl", + "AsyncVideoBotsRequestMessagesItemContentItem_Text", + "AsyncVideoBotsRequestMessagesItemRole", + "AsyncVideoBotsRequestOpenaiTtsModel", + "AsyncVideoBotsRequestOpenaiVoiceName", + "AsyncVideoBotsRequestResponseFormatType", + "AsyncVideoBotsRequestSadtalkerSettings", + "AsyncVideoBotsRequestSadtalkerSettingsPreprocess", + "AsyncVideoBotsRequestSelectedModel", + "AsyncVideoBotsRequestTranslationModel", + "AsyncVideoBotsRequestTtsProvider", +] diff --git a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_asr_model.py b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_asr_model.py new file mode 100644 index 0000000..99ea71d --- /dev/null +++ b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_asr_model.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AsyncVideoBotsRequestAsrModel = typing.Union[ + typing.Literal[ + "whisper_large_v2", + "whisper_large_v3", + "whisper_hindi_large_v2", + "whisper_telugu_large_v2", + "nemo_english", + "nemo_hindi", + "vakyansh_bhojpuri", + "gcp_v1", + "usm", + "deepgram", + "azure", + "seamless_m4t_v2", + "mms_1b_all", + "seamless_m4t", + ], + typing.Any, +] diff --git a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_citation_style.py b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_citation_style.py new file mode 100644 index 0000000..5d6c4b0 --- /dev/null +++ b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_citation_style.py @@ -0,0 +1,25 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AsyncVideoBotsRequestCitationStyle = typing.Union[ + typing.Literal[ + "number", + "title", + "url", + "symbol", + "markdown", + "html", + "slack_mrkdwn", + "plaintext", + "number_markdown", + "number_html", + "number_slack_mrkdwn", + "number_plaintext", + "symbol_markdown", + "symbol_html", + "symbol_slack_mrkdwn", + "symbol_plaintext", + ], + typing.Any, +] diff --git a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_embedding_model.py b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_embedding_model.py new file mode 100644 index 0000000..6bc8751 --- /dev/null +++ b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_embedding_model.py @@ -0,0 +1,18 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AsyncVideoBotsRequestEmbeddingModel = typing.Union[ + typing.Literal[ + "openai_3_large", + "openai_3_small", + "openai_ada_2", + "e5_large_v2", + "e5_base_v2", + "multilingual_e5_base", + "multilingual_e5_large", + "gte_large", + "gte_base", + ], + typing.Any, +] diff --git a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_functions_item.py b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_functions_item.py new file mode 100644 index 0000000..c6399d0 --- /dev/null +++ b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ...core.pydantic_utilities import UniversalBaseModel +from .async_video_bots_request_functions_item_trigger import AsyncVideoBotsRequestFunctionsItemTrigger +import pydantic +from ...core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class AsyncVideoBotsRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: AsyncVideoBotsRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_functions_item_trigger.py b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_functions_item_trigger.py new file mode 100644 index 0000000..807a5d7 --- /dev/null +++ b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AsyncVideoBotsRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_lipsync_model.py b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_lipsync_model.py new file mode 100644 index 0000000..c8646cd --- /dev/null +++ b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_lipsync_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AsyncVideoBotsRequestLipsyncModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any] diff --git a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_messages_item.py b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_messages_item.py new file mode 100644 index 0000000..e7692a0 --- /dev/null +++ b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_messages_item.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ...core.pydantic_utilities import UniversalBaseModel +from .async_video_bots_request_messages_item_role import AsyncVideoBotsRequestMessagesItemRole +from .async_video_bots_request_messages_item_content import AsyncVideoBotsRequestMessagesItemContent +import typing +from ...core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class AsyncVideoBotsRequestMessagesItem(UniversalBaseModel): + role: AsyncVideoBotsRequestMessagesItemRole + content: AsyncVideoBotsRequestMessagesItemContent + display_name: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_messages_item_content.py b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_messages_item_content.py new file mode 100644 index 0000000..e346909 --- /dev/null +++ b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_messages_item_content.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .async_video_bots_request_messages_item_content_item import AsyncVideoBotsRequestMessagesItemContentItem + +AsyncVideoBotsRequestMessagesItemContent = typing.Union[str, typing.List[AsyncVideoBotsRequestMessagesItemContentItem]] diff --git a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_messages_item_content_item.py b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_messages_item_content_item.py new file mode 100644 index 0000000..cf537ff --- /dev/null +++ b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_messages_item_content_item.py @@ -0,0 +1,41 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ...core.pydantic_utilities import UniversalBaseModel +import typing +from ...core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic +from ...types.image_url import ImageUrl + + +class AsyncVideoBotsRequestMessagesItemContentItem_Text(UniversalBaseModel): + type: typing.Literal["text"] = "text" + text: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +class AsyncVideoBotsRequestMessagesItemContentItem_ImageUrl(UniversalBaseModel): + type: typing.Literal["image_url"] = "image_url" + image_url: typing.Optional[ImageUrl] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +AsyncVideoBotsRequestMessagesItemContentItem = typing.Union[ + AsyncVideoBotsRequestMessagesItemContentItem_Text, AsyncVideoBotsRequestMessagesItemContentItem_ImageUrl +] diff --git a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_messages_item_role.py b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_messages_item_role.py new file mode 100644 index 0000000..3cede39 --- /dev/null +++ b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_messages_item_role.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AsyncVideoBotsRequestMessagesItemRole = typing.Union[typing.Literal["user", "system", "assistant"], typing.Any] diff --git a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_openai_tts_model.py b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_openai_tts_model.py new file mode 100644 index 0000000..5b4c798 --- /dev/null +++ b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_openai_tts_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AsyncVideoBotsRequestOpenaiTtsModel = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any] diff --git a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_openai_voice_name.py b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_openai_voice_name.py new file mode 100644 index 0000000..5ebcb9c --- /dev/null +++ b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_openai_voice_name.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AsyncVideoBotsRequestOpenaiVoiceName = typing.Union[ + typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any +] diff --git a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_response_format_type.py b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_response_format_type.py new file mode 100644 index 0000000..875ddbf --- /dev/null +++ b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AsyncVideoBotsRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_sadtalker_settings.py b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_sadtalker_settings.py new file mode 100644 index 0000000..90c64e2 --- /dev/null +++ b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_sadtalker_settings.py @@ -0,0 +1,40 @@ +# This file was auto-generated by Fern from our API Definition. + +from ...core.pydantic_utilities import UniversalBaseModel +import typing +from .async_video_bots_request_sadtalker_settings_preprocess import AsyncVideoBotsRequestSadtalkerSettingsPreprocess +import pydantic +from ...core.pydantic_utilities import IS_PYDANTIC_V2 + + +class AsyncVideoBotsRequestSadtalkerSettings(UniversalBaseModel): + still: typing.Optional[bool] = None + preprocess: typing.Optional[AsyncVideoBotsRequestSadtalkerSettingsPreprocess] = pydantic.Field(default=None) + """ + SadTalker only generates 512x512 output. 'crop' handles this by cropping the input to 512x512. 'resize' scales down the input to fit 512x512 and scales it back up after lipsyncing (does not work well for full person images, better for portraits). 'full' processes the cropped region and pastes it back into the original input. 'extcrop' and 'extfull' are similar to 'crop' and 'full' but with extended cropping. + """ + + pose_style: typing.Optional[int] = pydantic.Field(default=None) + """ + Random seed 0-45 inclusive that affects how the pose is animated. + """ + + expression_scale: typing.Optional[float] = pydantic.Field(default=None) + """ + Scale the amount of expression motion. 1.0 is normal, 0.5 is very reduced, and 2.0 is quite a lot. + """ + + ref_eyeblink: typing.Optional[str] = None + ref_pose: typing.Optional[str] = None + input_yaw: typing.Optional[typing.List[int]] = None + input_pitch: typing.Optional[typing.List[int]] = None + input_roll: typing.Optional[typing.List[int]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_sadtalker_settings_preprocess.py b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_sadtalker_settings_preprocess.py new file mode 100644 index 0000000..587424a --- /dev/null +++ b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_sadtalker_settings_preprocess.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AsyncVideoBotsRequestSadtalkerSettingsPreprocess = typing.Union[ + typing.Literal["crop", "extcrop", "resize", "full", "extfull"], typing.Any +] diff --git a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_selected_model.py b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_selected_model.py new file mode 100644 index 0000000..8e05ce3 --- /dev/null +++ b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_selected_model.py @@ -0,0 +1,47 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AsyncVideoBotsRequestSelectedModel = typing.Union[ + typing.Literal[ + "gpt_4_o", + "gpt_4_o_mini", + "chatgpt_4_o", + "gpt_4_turbo_vision", + "gpt_4_vision", + "gpt_4_turbo", + "gpt_4", + "gpt_4_32k", + "gpt_3_5_turbo", + "gpt_3_5_turbo_16k", + "gpt_3_5_turbo_instruct", + "llama3_70b", + "llama_3_groq_70b_tool_use", + "llama3_8b", + "llama_3_groq_8b_tool_use", + "llama2_70b_chat", + "mixtral_8x7b_instruct_0_1", + "gemma_2_9b_it", + "gemma_7b_it", + "gemini_1_5_flash", + "gemini_1_5_pro", + "gemini_1_pro_vision", + "gemini_1_pro", + "palm2_chat", + "palm2_text", + "claude_3_5_sonnet", + "claude_3_opus", + "claude_3_sonnet", + "claude_3_haiku", + "sea_lion_7b_instruct", + "llama3_8b_cpt_sea_lion_v2_instruct", + "sarvam_2b", + "text_davinci_003", + "text_davinci_002", + "code_davinci_002", + "text_curie_001", + "text_babbage_001", + "text_ada_001", + ], + typing.Any, +] diff --git a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_translation_model.py b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_translation_model.py new file mode 100644 index 0000000..35d4b1a --- /dev/null +++ b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_translation_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AsyncVideoBotsRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any] diff --git a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_tts_provider.py b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_tts_provider.py new file mode 100644 index 0000000..f492ea1 --- /dev/null +++ b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_tts_provider.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AsyncVideoBotsRequestTtsProvider = typing.Union[ + typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any +] diff --git a/src/gooey/core/client_wrapper.py b/src/gooey/core/client_wrapper.py index e4e31f6..4bd75e2 100644 --- a/src/gooey/core/client_wrapper.py +++ b/src/gooey/core/client_wrapper.py @@ -22,7 +22,7 @@ def get_headers(self) -> typing.Dict[str, str]: headers: typing.Dict[str, str] = { "X-Fern-Language": "Python", "X-Fern-SDK-Name": "gooeyai", - "X-Fern-SDK-Version": "0.0.1-beta11", + "X-Fern-SDK-Version": "0.0.1-beta12", } headers["Authorization"] = f"Bearer {self._get_api_key()}" return headers diff --git a/src/gooey/errors/__init__.py b/src/gooey/errors/__init__.py index 5d9271d..19ea9c4 100644 --- a/src/gooey/errors/__init__.py +++ b/src/gooey/errors/__init__.py @@ -1,6 +1,7 @@ # This file was auto-generated by Fern from our API Definition. from .payment_required_error import PaymentRequiredError +from .too_many_requests_error import TooManyRequestsError from .unprocessable_entity_error import UnprocessableEntityError -__all__ = ["PaymentRequiredError", "UnprocessableEntityError"] +__all__ = ["PaymentRequiredError", "TooManyRequestsError", "UnprocessableEntityError"] diff --git a/src/gooey/errors/too_many_requests_error.py b/src/gooey/errors/too_many_requests_error.py new file mode 100644 index 0000000..81d358c --- /dev/null +++ b/src/gooey/errors/too_many_requests_error.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.api_error import ApiError +from ..types.generic_error_response import GenericErrorResponse + + +class TooManyRequestsError(ApiError): + def __init__(self, body: GenericErrorResponse): + super().__init__(status_code=429, body=body) diff --git a/src/gooey/evaluator/__init__.py b/src/gooey/evaluator/__init__.py new file mode 100644 index 0000000..d4ba20f --- /dev/null +++ b/src/gooey/evaluator/__init__.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +from .types import ( + BulkEvalPageRequestAggFunctionsItem, + BulkEvalPageRequestAggFunctionsItemFunction, + BulkEvalPageRequestEvalPromptsItem, + BulkEvalPageRequestFunctionsItem, + BulkEvalPageRequestFunctionsItemTrigger, + BulkEvalPageRequestResponseFormatType, + BulkEvalPageRequestSelectedModel, +) + +__all__ = [ + "BulkEvalPageRequestAggFunctionsItem", + "BulkEvalPageRequestAggFunctionsItemFunction", + "BulkEvalPageRequestEvalPromptsItem", + "BulkEvalPageRequestFunctionsItem", + "BulkEvalPageRequestFunctionsItemTrigger", + "BulkEvalPageRequestResponseFormatType", + "BulkEvalPageRequestSelectedModel", +] diff --git a/src/gooey/evaluator/client.py b/src/gooey/evaluator/client.py new file mode 100644 index 0000000..d771f1d --- /dev/null +++ b/src/gooey/evaluator/client.py @@ -0,0 +1,342 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..core.client_wrapper import SyncClientWrapper +from .types.bulk_eval_page_request_functions_item import BulkEvalPageRequestFunctionsItem +from .types.bulk_eval_page_request_eval_prompts_item import BulkEvalPageRequestEvalPromptsItem +from .types.bulk_eval_page_request_agg_functions_item import BulkEvalPageRequestAggFunctionsItem +from .types.bulk_eval_page_request_selected_model import BulkEvalPageRequestSelectedModel +from .types.bulk_eval_page_request_response_format_type import BulkEvalPageRequestResponseFormatType +from ..types.run_settings import RunSettings +from ..core.request_options import RequestOptions +from ..types.bulk_eval_page_output import BulkEvalPageOutput +from ..core.pydantic_utilities import parse_obj_as +from ..errors.payment_required_error import PaymentRequiredError +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from ..errors.too_many_requests_error import TooManyRequestsError +from ..types.generic_error_response import GenericErrorResponse +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from ..core.client_wrapper import AsyncClientWrapper + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class EvaluatorClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def async_bulk_eval( + self, + *, + documents: typing.Sequence[str], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[BulkEvalPageRequestFunctionsItem]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + eval_prompts: typing.Optional[typing.Sequence[BulkEvalPageRequestEvalPromptsItem]] = OMIT, + agg_functions: typing.Optional[typing.Sequence[BulkEvalPageRequestAggFunctionsItem]] = OMIT, + selected_model: typing.Optional[BulkEvalPageRequestSelectedModel] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[BulkEvalPageRequestResponseFormatType] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> BulkEvalPageOutput: + """ + Parameters + ---------- + documents : typing.Sequence[str] + + Upload or link to a CSV or google sheet that contains your sample input data. + For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. + Remember to includes header names in your CSV too. + + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[BulkEvalPageRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + eval_prompts : typing.Optional[typing.Sequence[BulkEvalPageRequestEvalPromptsItem]] + + Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values. + _The `columns` dictionary can be used to reference the spreadsheet columns._ + + + agg_functions : typing.Optional[typing.Sequence[BulkEvalPageRequestAggFunctionsItem]] + + Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats). + + + selected_model : typing.Optional[BulkEvalPageRequestSelectedModel] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[BulkEvalPageRequestResponseFormatType] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + BulkEvalPageOutput + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.evaluator.async_bulk_eval( + documents=["documents"], + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/bulk-eval/async", + method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "documents": documents, + "eval_prompts": eval_prompts, + "agg_functions": agg_functions, + "selected_model": selected_model, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + BulkEvalPageOutput, + parse_obj_as( + type_=BulkEvalPageOutput, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncEvaluatorClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def async_bulk_eval( + self, + *, + documents: typing.Sequence[str], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[BulkEvalPageRequestFunctionsItem]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + eval_prompts: typing.Optional[typing.Sequence[BulkEvalPageRequestEvalPromptsItem]] = OMIT, + agg_functions: typing.Optional[typing.Sequence[BulkEvalPageRequestAggFunctionsItem]] = OMIT, + selected_model: typing.Optional[BulkEvalPageRequestSelectedModel] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[BulkEvalPageRequestResponseFormatType] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> BulkEvalPageOutput: + """ + Parameters + ---------- + documents : typing.Sequence[str] + + Upload or link to a CSV or google sheet that contains your sample input data. + For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. + Remember to includes header names in your CSV too. + + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[BulkEvalPageRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + eval_prompts : typing.Optional[typing.Sequence[BulkEvalPageRequestEvalPromptsItem]] + + Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values. + _The `columns` dictionary can be used to reference the spreadsheet columns._ + + + agg_functions : typing.Optional[typing.Sequence[BulkEvalPageRequestAggFunctionsItem]] + + Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats). + + + selected_model : typing.Optional[BulkEvalPageRequestSelectedModel] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[BulkEvalPageRequestResponseFormatType] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + BulkEvalPageOutput + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.evaluator.async_bulk_eval( + documents=["documents"], + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/bulk-eval/async", + method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "documents": documents, + "eval_prompts": eval_prompts, + "agg_functions": agg_functions, + "selected_model": selected_model, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + BulkEvalPageOutput, + parse_obj_as( + type_=BulkEvalPageOutput, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/evaluator/types/__init__.py b/src/gooey/evaluator/types/__init__.py new file mode 100644 index 0000000..87bb267 --- /dev/null +++ b/src/gooey/evaluator/types/__init__.py @@ -0,0 +1,19 @@ +# This file was auto-generated by Fern from our API Definition. + +from .bulk_eval_page_request_agg_functions_item import BulkEvalPageRequestAggFunctionsItem +from .bulk_eval_page_request_agg_functions_item_function import BulkEvalPageRequestAggFunctionsItemFunction +from .bulk_eval_page_request_eval_prompts_item import BulkEvalPageRequestEvalPromptsItem +from .bulk_eval_page_request_functions_item import BulkEvalPageRequestFunctionsItem +from .bulk_eval_page_request_functions_item_trigger import BulkEvalPageRequestFunctionsItemTrigger +from .bulk_eval_page_request_response_format_type import BulkEvalPageRequestResponseFormatType +from .bulk_eval_page_request_selected_model import BulkEvalPageRequestSelectedModel + +__all__ = [ + "BulkEvalPageRequestAggFunctionsItem", + "BulkEvalPageRequestAggFunctionsItemFunction", + "BulkEvalPageRequestEvalPromptsItem", + "BulkEvalPageRequestFunctionsItem", + "BulkEvalPageRequestFunctionsItemTrigger", + "BulkEvalPageRequestResponseFormatType", + "BulkEvalPageRequestSelectedModel", +] diff --git a/src/gooey/types/bulk_eval_page_request_agg_functions_item.py b/src/gooey/evaluator/types/bulk_eval_page_request_agg_functions_item.py similarity index 85% rename from src/gooey/types/bulk_eval_page_request_agg_functions_item.py rename to src/gooey/evaluator/types/bulk_eval_page_request_agg_functions_item.py index b7f5cd7..391a7ae 100644 --- a/src/gooey/types/bulk_eval_page_request_agg_functions_item.py +++ b/src/gooey/evaluator/types/bulk_eval_page_request_agg_functions_item.py @@ -1,9 +1,9 @@ # This file was auto-generated by Fern from our API Definition. -from ..core.pydantic_utilities import UniversalBaseModel +from ...core.pydantic_utilities import UniversalBaseModel import typing from .bulk_eval_page_request_agg_functions_item_function import BulkEvalPageRequestAggFunctionsItemFunction -from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ...core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic diff --git a/src/gooey/types/bulk_eval_page_request_agg_functions_item_function.py b/src/gooey/evaluator/types/bulk_eval_page_request_agg_functions_item_function.py similarity index 100% rename from src/gooey/types/bulk_eval_page_request_agg_functions_item_function.py rename to src/gooey/evaluator/types/bulk_eval_page_request_agg_functions_item_function.py diff --git a/src/gooey/types/bulk_eval_page_request_eval_prompts_item.py b/src/gooey/evaluator/types/bulk_eval_page_request_eval_prompts_item.py similarity index 81% rename from src/gooey/types/bulk_eval_page_request_eval_prompts_item.py rename to src/gooey/evaluator/types/bulk_eval_page_request_eval_prompts_item.py index 7d3956d..8bbc6b0 100644 --- a/src/gooey/types/bulk_eval_page_request_eval_prompts_item.py +++ b/src/gooey/evaluator/types/bulk_eval_page_request_eval_prompts_item.py @@ -1,7 +1,7 @@ # This file was auto-generated by Fern from our API Definition. -from ..core.pydantic_utilities import UniversalBaseModel -from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ...core.pydantic_utilities import UniversalBaseModel +from ...core.pydantic_utilities import IS_PYDANTIC_V2 import typing import pydantic diff --git a/src/gooey/types/bulk_eval_page_request_functions_item.py b/src/gooey/evaluator/types/bulk_eval_page_request_functions_item.py similarity index 86% rename from src/gooey/types/bulk_eval_page_request_functions_item.py rename to src/gooey/evaluator/types/bulk_eval_page_request_functions_item.py index b89037c..26ac5c6 100644 --- a/src/gooey/types/bulk_eval_page_request_functions_item.py +++ b/src/gooey/evaluator/types/bulk_eval_page_request_functions_item.py @@ -1,9 +1,9 @@ # This file was auto-generated by Fern from our API Definition. -from ..core.pydantic_utilities import UniversalBaseModel +from ...core.pydantic_utilities import UniversalBaseModel from .bulk_eval_page_request_functions_item_trigger import BulkEvalPageRequestFunctionsItemTrigger import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ...core.pydantic_utilities import IS_PYDANTIC_V2 import typing diff --git a/src/gooey/types/bulk_eval_page_request_functions_item_trigger.py b/src/gooey/evaluator/types/bulk_eval_page_request_functions_item_trigger.py similarity index 100% rename from src/gooey/types/bulk_eval_page_request_functions_item_trigger.py rename to src/gooey/evaluator/types/bulk_eval_page_request_functions_item_trigger.py diff --git a/src/gooey/types/bulk_eval_page_request_response_format_type.py b/src/gooey/evaluator/types/bulk_eval_page_request_response_format_type.py similarity index 100% rename from src/gooey/types/bulk_eval_page_request_response_format_type.py rename to src/gooey/evaluator/types/bulk_eval_page_request_response_format_type.py diff --git a/src/gooey/types/bulk_eval_page_request_selected_model.py b/src/gooey/evaluator/types/bulk_eval_page_request_selected_model.py similarity index 100% rename from src/gooey/types/bulk_eval_page_request_selected_model.py rename to src/gooey/evaluator/types/bulk_eval_page_request_selected_model.py diff --git a/src/gooey/functions/__init__.py b/src/gooey/functions/__init__.py new file mode 100644 index 0000000..f3ea265 --- /dev/null +++ b/src/gooey/functions/__init__.py @@ -0,0 +1,2 @@ +# This file was auto-generated by Fern from our API Definition. + diff --git a/src/gooey/functions/client.py b/src/gooey/functions/client.py new file mode 100644 index 0000000..0047ff0 --- /dev/null +++ b/src/gooey/functions/client.py @@ -0,0 +1,231 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..core.client_wrapper import SyncClientWrapper +from ..types.run_settings import RunSettings +from ..core.request_options import RequestOptions +from ..types.functions_page_output import FunctionsPageOutput +from ..core.pydantic_utilities import parse_obj_as +from ..errors.payment_required_error import PaymentRequiredError +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from ..errors.too_many_requests_error import TooManyRequestsError +from ..types.generic_error_response import GenericErrorResponse +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from ..core.client_wrapper import AsyncClientWrapper + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class FunctionsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def async_functions( + self, + *, + example_id: typing.Optional[str] = None, + code: typing.Optional[str] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> FunctionsPageOutput: + """ + Parameters + ---------- + example_id : typing.Optional[str] + + code : typing.Optional[str] + The JS code to be executed. + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used in the code + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + FunctionsPageOutput + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.functions.async_functions() + """ + _response = self._client_wrapper.httpx_client.request( + "v3/functions/async", + method="POST", + params={ + "example_id": example_id, + }, + json={ + "code": code, + "variables": variables, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + FunctionsPageOutput, + parse_obj_as( + type_=FunctionsPageOutput, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncFunctionsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def async_functions( + self, + *, + example_id: typing.Optional[str] = None, + code: typing.Optional[str] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> FunctionsPageOutput: + """ + Parameters + ---------- + example_id : typing.Optional[str] + + code : typing.Optional[str] + The JS code to be executed. + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used in the code + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + FunctionsPageOutput + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.functions.async_functions() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/functions/async", + method="POST", + params={ + "example_id": example_id, + }, + json={ + "code": code, + "variables": variables, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + FunctionsPageOutput, + parse_obj_as( + type_=FunctionsPageOutput, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/lip_syncing/__init__.py b/src/gooey/lip_syncing/__init__.py new file mode 100644 index 0000000..b03755d --- /dev/null +++ b/src/gooey/lip_syncing/__init__.py @@ -0,0 +1,17 @@ +# This file was auto-generated by Fern from our API Definition. + +from .types import ( + AsyncLipsyncRequestFunctionsItem, + AsyncLipsyncRequestFunctionsItemTrigger, + AsyncLipsyncRequestSadtalkerSettings, + AsyncLipsyncRequestSadtalkerSettingsPreprocess, + AsyncLipsyncRequestSelectedModel, +) + +__all__ = [ + "AsyncLipsyncRequestFunctionsItem", + "AsyncLipsyncRequestFunctionsItemTrigger", + "AsyncLipsyncRequestSadtalkerSettings", + "AsyncLipsyncRequestSadtalkerSettingsPreprocess", + "AsyncLipsyncRequestSelectedModel", +] diff --git a/src/gooey/lip_syncing/client.py b/src/gooey/lip_syncing/client.py new file mode 100644 index 0000000..73f492c --- /dev/null +++ b/src/gooey/lip_syncing/client.py @@ -0,0 +1,305 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..core.client_wrapper import SyncClientWrapper +from .types.async_lipsync_request_functions_item import AsyncLipsyncRequestFunctionsItem +from .. import core +from .types.async_lipsync_request_sadtalker_settings import AsyncLipsyncRequestSadtalkerSettings +from .types.async_lipsync_request_selected_model import AsyncLipsyncRequestSelectedModel +from ..types.run_settings import RunSettings +from ..core.request_options import RequestOptions +from ..types.lipsync_page_output import LipsyncPageOutput +from ..core.pydantic_utilities import parse_obj_as +from ..errors.payment_required_error import PaymentRequiredError +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from ..errors.too_many_requests_error import TooManyRequestsError +from ..types.generic_error_response import GenericErrorResponse +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from ..core.client_wrapper import AsyncClientWrapper + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class LipSyncingClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def async_lipsync( + self, + *, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.List[AsyncLipsyncRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + input_face: typing.Optional[core.File] = None, + face_padding_top: typing.Optional[int] = None, + face_padding_bottom: typing.Optional[int] = None, + face_padding_left: typing.Optional[int] = None, + face_padding_right: typing.Optional[int] = None, + sadtalker_settings: typing.Optional[AsyncLipsyncRequestSadtalkerSettings] = None, + selected_model: typing.Optional[AsyncLipsyncRequestSelectedModel] = None, + input_audio: typing.Optional[core.File] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> LipsyncPageOutput: + """ + Parameters + ---------- + example_id : typing.Optional[str] + + functions : typing.Optional[typing.List[AsyncLipsyncRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + input_face : typing.Optional[core.File] + See core.File for more documentation + + face_padding_top : typing.Optional[int] + + face_padding_bottom : typing.Optional[int] + + face_padding_left : typing.Optional[int] + + face_padding_right : typing.Optional[int] + + sadtalker_settings : typing.Optional[AsyncLipsyncRequestSadtalkerSettings] + + selected_model : typing.Optional[AsyncLipsyncRequestSelectedModel] + + input_audio : typing.Optional[core.File] + See core.File for more documentation + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + LipsyncPageOutput + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.lip_syncing.async_lipsync() + """ + _response = self._client_wrapper.httpx_client.request( + "v3/Lipsync/async", + method="POST", + params={ + "example_id": example_id, + }, + data={ + "functions": functions, + "variables": variables, + "face_padding_top": face_padding_top, + "face_padding_bottom": face_padding_bottom, + "face_padding_left": face_padding_left, + "face_padding_right": face_padding_right, + "sadtalker_settings": sadtalker_settings, + "selected_model": selected_model, + "settings": settings, + }, + files={ + "input_face": input_face, + "input_audio": input_audio, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + LipsyncPageOutput, + parse_obj_as( + type_=LipsyncPageOutput, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncLipSyncingClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def async_lipsync( + self, + *, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.List[AsyncLipsyncRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + input_face: typing.Optional[core.File] = None, + face_padding_top: typing.Optional[int] = None, + face_padding_bottom: typing.Optional[int] = None, + face_padding_left: typing.Optional[int] = None, + face_padding_right: typing.Optional[int] = None, + sadtalker_settings: typing.Optional[AsyncLipsyncRequestSadtalkerSettings] = None, + selected_model: typing.Optional[AsyncLipsyncRequestSelectedModel] = None, + input_audio: typing.Optional[core.File] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> LipsyncPageOutput: + """ + Parameters + ---------- + example_id : typing.Optional[str] + + functions : typing.Optional[typing.List[AsyncLipsyncRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + input_face : typing.Optional[core.File] + See core.File for more documentation + + face_padding_top : typing.Optional[int] + + face_padding_bottom : typing.Optional[int] + + face_padding_left : typing.Optional[int] + + face_padding_right : typing.Optional[int] + + sadtalker_settings : typing.Optional[AsyncLipsyncRequestSadtalkerSettings] + + selected_model : typing.Optional[AsyncLipsyncRequestSelectedModel] + + input_audio : typing.Optional[core.File] + See core.File for more documentation + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + LipsyncPageOutput + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.lip_syncing.async_lipsync() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/Lipsync/async", + method="POST", + params={ + "example_id": example_id, + }, + data={ + "functions": functions, + "variables": variables, + "face_padding_top": face_padding_top, + "face_padding_bottom": face_padding_bottom, + "face_padding_left": face_padding_left, + "face_padding_right": face_padding_right, + "sadtalker_settings": sadtalker_settings, + "selected_model": selected_model, + "settings": settings, + }, + files={ + "input_face": input_face, + "input_audio": input_audio, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + LipsyncPageOutput, + parse_obj_as( + type_=LipsyncPageOutput, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/lip_syncing/types/__init__.py b/src/gooey/lip_syncing/types/__init__.py new file mode 100644 index 0000000..bb6b073 --- /dev/null +++ b/src/gooey/lip_syncing/types/__init__.py @@ -0,0 +1,15 @@ +# This file was auto-generated by Fern from our API Definition. + +from .async_lipsync_request_functions_item import AsyncLipsyncRequestFunctionsItem +from .async_lipsync_request_functions_item_trigger import AsyncLipsyncRequestFunctionsItemTrigger +from .async_lipsync_request_sadtalker_settings import AsyncLipsyncRequestSadtalkerSettings +from .async_lipsync_request_sadtalker_settings_preprocess import AsyncLipsyncRequestSadtalkerSettingsPreprocess +from .async_lipsync_request_selected_model import AsyncLipsyncRequestSelectedModel + +__all__ = [ + "AsyncLipsyncRequestFunctionsItem", + "AsyncLipsyncRequestFunctionsItemTrigger", + "AsyncLipsyncRequestSadtalkerSettings", + "AsyncLipsyncRequestSadtalkerSettingsPreprocess", + "AsyncLipsyncRequestSelectedModel", +] diff --git a/src/gooey/lip_syncing/types/async_lipsync_request_functions_item.py b/src/gooey/lip_syncing/types/async_lipsync_request_functions_item.py new file mode 100644 index 0000000..6046d25 --- /dev/null +++ b/src/gooey/lip_syncing/types/async_lipsync_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ...core.pydantic_utilities import UniversalBaseModel +from .async_lipsync_request_functions_item_trigger import AsyncLipsyncRequestFunctionsItemTrigger +import pydantic +from ...core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class AsyncLipsyncRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: AsyncLipsyncRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/lip_syncing/types/async_lipsync_request_functions_item_trigger.py b/src/gooey/lip_syncing/types/async_lipsync_request_functions_item_trigger.py new file mode 100644 index 0000000..e329956 --- /dev/null +++ b/src/gooey/lip_syncing/types/async_lipsync_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AsyncLipsyncRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/lip_syncing/types/async_lipsync_request_sadtalker_settings.py b/src/gooey/lip_syncing/types/async_lipsync_request_sadtalker_settings.py new file mode 100644 index 0000000..4540fe5 --- /dev/null +++ b/src/gooey/lip_syncing/types/async_lipsync_request_sadtalker_settings.py @@ -0,0 +1,40 @@ +# This file was auto-generated by Fern from our API Definition. + +from ...core.pydantic_utilities import UniversalBaseModel +import typing +from .async_lipsync_request_sadtalker_settings_preprocess import AsyncLipsyncRequestSadtalkerSettingsPreprocess +import pydantic +from ...core.pydantic_utilities import IS_PYDANTIC_V2 + + +class AsyncLipsyncRequestSadtalkerSettings(UniversalBaseModel): + still: typing.Optional[bool] = None + preprocess: typing.Optional[AsyncLipsyncRequestSadtalkerSettingsPreprocess] = pydantic.Field(default=None) + """ + SadTalker only generates 512x512 output. 'crop' handles this by cropping the input to 512x512. 'resize' scales down the input to fit 512x512 and scales it back up after lipsyncing (does not work well for full person images, better for portraits). 'full' processes the cropped region and pastes it back into the original input. 'extcrop' and 'extfull' are similar to 'crop' and 'full' but with extended cropping. + """ + + pose_style: typing.Optional[int] = pydantic.Field(default=None) + """ + Random seed 0-45 inclusive that affects how the pose is animated. + """ + + expression_scale: typing.Optional[float] = pydantic.Field(default=None) + """ + Scale the amount of expression motion. 1.0 is normal, 0.5 is very reduced, and 2.0 is quite a lot. + """ + + ref_eyeblink: typing.Optional[str] = None + ref_pose: typing.Optional[str] = None + input_yaw: typing.Optional[typing.List[int]] = None + input_pitch: typing.Optional[typing.List[int]] = None + input_roll: typing.Optional[typing.List[int]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/lip_syncing/types/async_lipsync_request_sadtalker_settings_preprocess.py b/src/gooey/lip_syncing/types/async_lipsync_request_sadtalker_settings_preprocess.py new file mode 100644 index 0000000..21e1a4a --- /dev/null +++ b/src/gooey/lip_syncing/types/async_lipsync_request_sadtalker_settings_preprocess.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AsyncLipsyncRequestSadtalkerSettingsPreprocess = typing.Union[ + typing.Literal["crop", "extcrop", "resize", "full", "extfull"], typing.Any +] diff --git a/src/gooey/lip_syncing/types/async_lipsync_request_selected_model.py b/src/gooey/lip_syncing/types/async_lipsync_request_selected_model.py new file mode 100644 index 0000000..12da6d9 --- /dev/null +++ b/src/gooey/lip_syncing/types/async_lipsync_request_selected_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AsyncLipsyncRequestSelectedModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any] diff --git a/src/gooey/smart_gpt/__init__.py b/src/gooey/smart_gpt/__init__.py new file mode 100644 index 0000000..e5f60f5 --- /dev/null +++ b/src/gooey/smart_gpt/__init__.py @@ -0,0 +1,15 @@ +# This file was auto-generated by Fern from our API Definition. + +from .types import ( + SmartGptPageRequestFunctionsItem, + SmartGptPageRequestFunctionsItemTrigger, + SmartGptPageRequestResponseFormatType, + SmartGptPageRequestSelectedModel, +) + +__all__ = [ + "SmartGptPageRequestFunctionsItem", + "SmartGptPageRequestFunctionsItemTrigger", + "SmartGptPageRequestResponseFormatType", + "SmartGptPageRequestSelectedModel", +] diff --git a/src/gooey/smart_gpt/client.py b/src/gooey/smart_gpt/client.py new file mode 100644 index 0000000..b100c7d --- /dev/null +++ b/src/gooey/smart_gpt/client.py @@ -0,0 +1,324 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..core.client_wrapper import SyncClientWrapper +from .types.smart_gpt_page_request_functions_item import SmartGptPageRequestFunctionsItem +from .types.smart_gpt_page_request_selected_model import SmartGptPageRequestSelectedModel +from .types.smart_gpt_page_request_response_format_type import SmartGptPageRequestResponseFormatType +from ..types.run_settings import RunSettings +from ..core.request_options import RequestOptions +from ..types.smart_gpt_page_output import SmartGptPageOutput +from ..core.pydantic_utilities import parse_obj_as +from ..errors.payment_required_error import PaymentRequiredError +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from ..errors.too_many_requests_error import TooManyRequestsError +from ..types.generic_error_response import GenericErrorResponse +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from ..core.client_wrapper import AsyncClientWrapper + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class SmartGptClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def async_smart_gpt( + self, + *, + input_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[SmartGptPageRequestFunctionsItem]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + cot_prompt: typing.Optional[str] = OMIT, + reflexion_prompt: typing.Optional[str] = OMIT, + dera_prompt: typing.Optional[str] = OMIT, + selected_model: typing.Optional[SmartGptPageRequestSelectedModel] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[SmartGptPageRequestResponseFormatType] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> SmartGptPageOutput: + """ + Parameters + ---------- + input_prompt : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[SmartGptPageRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + cot_prompt : typing.Optional[str] + + reflexion_prompt : typing.Optional[str] + + dera_prompt : typing.Optional[str] + + selected_model : typing.Optional[SmartGptPageRequestSelectedModel] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[SmartGptPageRequestResponseFormatType] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + SmartGptPageOutput + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.smart_gpt.async_smart_gpt( + input_prompt="input_prompt", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/SmartGPT/async", + method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "input_prompt": input_prompt, + "cot_prompt": cot_prompt, + "reflexion_prompt": reflexion_prompt, + "dera_prompt": dera_prompt, + "selected_model": selected_model, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + SmartGptPageOutput, + parse_obj_as( + type_=SmartGptPageOutput, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncSmartGptClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def async_smart_gpt( + self, + *, + input_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[SmartGptPageRequestFunctionsItem]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + cot_prompt: typing.Optional[str] = OMIT, + reflexion_prompt: typing.Optional[str] = OMIT, + dera_prompt: typing.Optional[str] = OMIT, + selected_model: typing.Optional[SmartGptPageRequestSelectedModel] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[SmartGptPageRequestResponseFormatType] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> SmartGptPageOutput: + """ + Parameters + ---------- + input_prompt : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[SmartGptPageRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + cot_prompt : typing.Optional[str] + + reflexion_prompt : typing.Optional[str] + + dera_prompt : typing.Optional[str] + + selected_model : typing.Optional[SmartGptPageRequestSelectedModel] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[SmartGptPageRequestResponseFormatType] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + SmartGptPageOutput + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.smart_gpt.async_smart_gpt( + input_prompt="input_prompt", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/SmartGPT/async", + method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "input_prompt": input_prompt, + "cot_prompt": cot_prompt, + "reflexion_prompt": reflexion_prompt, + "dera_prompt": dera_prompt, + "selected_model": selected_model, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + SmartGptPageOutput, + parse_obj_as( + type_=SmartGptPageOutput, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/smart_gpt/types/__init__.py b/src/gooey/smart_gpt/types/__init__.py new file mode 100644 index 0000000..f866a87 --- /dev/null +++ b/src/gooey/smart_gpt/types/__init__.py @@ -0,0 +1,13 @@ +# This file was auto-generated by Fern from our API Definition. + +from .smart_gpt_page_request_functions_item import SmartGptPageRequestFunctionsItem +from .smart_gpt_page_request_functions_item_trigger import SmartGptPageRequestFunctionsItemTrigger +from .smart_gpt_page_request_response_format_type import SmartGptPageRequestResponseFormatType +from .smart_gpt_page_request_selected_model import SmartGptPageRequestSelectedModel + +__all__ = [ + "SmartGptPageRequestFunctionsItem", + "SmartGptPageRequestFunctionsItemTrigger", + "SmartGptPageRequestResponseFormatType", + "SmartGptPageRequestSelectedModel", +] diff --git a/src/gooey/types/smart_gpt_page_request_functions_item.py b/src/gooey/smart_gpt/types/smart_gpt_page_request_functions_item.py similarity index 86% rename from src/gooey/types/smart_gpt_page_request_functions_item.py rename to src/gooey/smart_gpt/types/smart_gpt_page_request_functions_item.py index edb2c83..03c5e72 100644 --- a/src/gooey/types/smart_gpt_page_request_functions_item.py +++ b/src/gooey/smart_gpt/types/smart_gpt_page_request_functions_item.py @@ -1,9 +1,9 @@ # This file was auto-generated by Fern from our API Definition. -from ..core.pydantic_utilities import UniversalBaseModel +from ...core.pydantic_utilities import UniversalBaseModel from .smart_gpt_page_request_functions_item_trigger import SmartGptPageRequestFunctionsItemTrigger import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ...core.pydantic_utilities import IS_PYDANTIC_V2 import typing diff --git a/src/gooey/types/smart_gpt_page_request_functions_item_trigger.py b/src/gooey/smart_gpt/types/smart_gpt_page_request_functions_item_trigger.py similarity index 100% rename from src/gooey/types/smart_gpt_page_request_functions_item_trigger.py rename to src/gooey/smart_gpt/types/smart_gpt_page_request_functions_item_trigger.py diff --git a/src/gooey/types/smart_gpt_page_request_response_format_type.py b/src/gooey/smart_gpt/types/smart_gpt_page_request_response_format_type.py similarity index 100% rename from src/gooey/types/smart_gpt_page_request_response_format_type.py rename to src/gooey/smart_gpt/types/smart_gpt_page_request_response_format_type.py diff --git a/src/gooey/types/smart_gpt_page_request_selected_model.py b/src/gooey/smart_gpt/types/smart_gpt_page_request_selected_model.py similarity index 100% rename from src/gooey/types/smart_gpt_page_request_selected_model.py rename to src/gooey/smart_gpt/types/smart_gpt_page_request_selected_model.py diff --git a/src/gooey/types/__init__.py b/src/gooey/types/__init__.py index 2f81897..1691490 100644 --- a/src/gooey/types/__init__.py +++ b/src/gooey/types/__init__.py @@ -20,15 +20,9 @@ from .balance_response import BalanceResponse from .bot_broadcast_filters import BotBroadcastFilters from .bulk_eval_page_output import BulkEvalPageOutput -from .bulk_eval_page_request import BulkEvalPageRequest -from .bulk_eval_page_request_agg_functions_item import BulkEvalPageRequestAggFunctionsItem -from .bulk_eval_page_request_agg_functions_item_function import BulkEvalPageRequestAggFunctionsItemFunction -from .bulk_eval_page_request_eval_prompts_item import BulkEvalPageRequestEvalPromptsItem -from .bulk_eval_page_request_functions_item import BulkEvalPageRequestFunctionsItem -from .bulk_eval_page_request_functions_item_trigger import BulkEvalPageRequestFunctionsItemTrigger -from .bulk_eval_page_request_response_format_type import BulkEvalPageRequestResponseFormatType -from .bulk_eval_page_request_selected_model import BulkEvalPageRequestSelectedModel from .bulk_eval_page_status_response import BulkEvalPageStatusResponse +from .bulk_run_request_functions_item import BulkRunRequestFunctionsItem +from .bulk_run_request_functions_item_trigger import BulkRunRequestFunctionsItemTrigger from .bulk_runner_page_output import BulkRunnerPageOutput from .bulk_runner_page_request import BulkRunnerPageRequest from .bulk_runner_page_request_functions_item import BulkRunnerPageRequestFunctionsItem @@ -45,14 +39,12 @@ from .chyron_plant_page_request_functions_item_trigger import ChyronPlantPageRequestFunctionsItemTrigger from .chyron_plant_page_status_response import ChyronPlantPageStatusResponse from .compare_llm_page_output import CompareLlmPageOutput -from .compare_llm_page_request import CompareLlmPageRequest from .compare_llm_page_request_functions_item import CompareLlmPageRequestFunctionsItem from .compare_llm_page_request_functions_item_trigger import CompareLlmPageRequestFunctionsItemTrigger from .compare_llm_page_request_response_format_type import CompareLlmPageRequestResponseFormatType from .compare_llm_page_request_selected_models_item import CompareLlmPageRequestSelectedModelsItem from .compare_llm_page_status_response import CompareLlmPageStatusResponse from .compare_text2img_page_output import CompareText2ImgPageOutput -from .compare_text2img_page_request import CompareText2ImgPageRequest from .compare_text2img_page_request_functions_item import CompareText2ImgPageRequestFunctionsItem from .compare_text2img_page_request_functions_item_trigger import CompareText2ImgPageRequestFunctionsItemTrigger from .compare_text2img_page_request_scheduler import CompareText2ImgPageRequestScheduler @@ -77,7 +69,6 @@ from .conversation_start import ConversationStart from .create_stream_response import CreateStreamResponse from .deforum_sd_page_output import DeforumSdPageOutput -from .deforum_sd_page_request import DeforumSdPageRequest from .deforum_sd_page_request_animation_prompts_item import DeforumSdPageRequestAnimationPromptsItem from .deforum_sd_page_request_functions_item import DeforumSdPageRequestFunctionsItem from .deforum_sd_page_request_functions_item_trigger import DeforumSdPageRequestFunctionsItemTrigger @@ -92,7 +83,6 @@ from .doc_extract_page_request_selected_model import DocExtractPageRequestSelectedModel from .doc_extract_page_status_response import DocExtractPageStatusResponse from .doc_search_page_output import DocSearchPageOutput -from .doc_search_page_request import DocSearchPageRequest from .doc_search_page_request_citation_style import DocSearchPageRequestCitationStyle from .doc_search_page_request_embedding_model import DocSearchPageRequestEmbeddingModel from .doc_search_page_request_functions_item import DocSearchPageRequestFunctionsItem @@ -109,8 +99,12 @@ from .doc_summary_page_request_selected_asr_model import DocSummaryPageRequestSelectedAsrModel from .doc_summary_page_request_selected_model import DocSummaryPageRequestSelectedModel from .doc_summary_page_status_response import DocSummaryPageStatusResponse +from .doc_summary_request_functions_item import DocSummaryRequestFunctionsItem +from .doc_summary_request_functions_item_trigger import DocSummaryRequestFunctionsItemTrigger +from .doc_summary_request_response_format_type import DocSummaryRequestResponseFormatType +from .doc_summary_request_selected_asr_model import DocSummaryRequestSelectedAsrModel +from .doc_summary_request_selected_model import DocSummaryRequestSelectedModel from .email_face_inpainting_page_output import EmailFaceInpaintingPageOutput -from .email_face_inpainting_page_request import EmailFaceInpaintingPageRequest from .email_face_inpainting_page_request_functions_item import EmailFaceInpaintingPageRequestFunctionsItem from .email_face_inpainting_page_request_functions_item_trigger import ( EmailFaceInpaintingPageRequestFunctionsItemTrigger, @@ -118,7 +112,6 @@ from .email_face_inpainting_page_request_selected_model import EmailFaceInpaintingPageRequestSelectedModel from .email_face_inpainting_page_status_response import EmailFaceInpaintingPageStatusResponse from .embeddings_page_output import EmbeddingsPageOutput -from .embeddings_page_request import EmbeddingsPageRequest from .embeddings_page_request_functions_item import EmbeddingsPageRequestFunctionsItem from .embeddings_page_request_functions_item_trigger import EmbeddingsPageRequestFunctionsItemTrigger from .embeddings_page_request_selected_model import EmbeddingsPageRequestSelectedModel @@ -132,12 +125,10 @@ from .face_inpainting_page_status_response import FaceInpaintingPageStatusResponse from .final_response import FinalResponse from .functions_page_output import FunctionsPageOutput -from .functions_page_request import FunctionsPageRequest from .functions_page_status_response import FunctionsPageStatusResponse from .generic_error_response import GenericErrorResponse from .generic_error_response_detail import GenericErrorResponseDetail from .google_gpt_page_output import GoogleGptPageOutput -from .google_gpt_page_request import GoogleGptPageRequest from .google_gpt_page_request_embedding_model import GoogleGptPageRequestEmbeddingModel from .google_gpt_page_request_functions_item import GoogleGptPageRequestFunctionsItem from .google_gpt_page_request_functions_item_trigger import GoogleGptPageRequestFunctionsItemTrigger @@ -145,7 +136,6 @@ from .google_gpt_page_request_selected_model import GoogleGptPageRequestSelectedModel from .google_gpt_page_status_response import GoogleGptPageStatusResponse from .google_image_gen_page_output import GoogleImageGenPageOutput -from .google_image_gen_page_request import GoogleImageGenPageRequest from .google_image_gen_page_request_functions_item import GoogleImageGenPageRequestFunctionsItem from .google_image_gen_page_request_functions_item_trigger import GoogleImageGenPageRequestFunctionsItemTrigger from .google_image_gen_page_request_selected_model import GoogleImageGenPageRequestSelectedModel @@ -192,6 +182,14 @@ from .lipsync_tts_page_request_selected_model import LipsyncTtsPageRequestSelectedModel from .lipsync_tts_page_request_tts_provider import LipsyncTtsPageRequestTtsProvider from .lipsync_tts_page_status_response import LipsyncTtsPageStatusResponse +from .lipsync_tts_request_functions_item import LipsyncTtsRequestFunctionsItem +from .lipsync_tts_request_functions_item_trigger import LipsyncTtsRequestFunctionsItemTrigger +from .lipsync_tts_request_openai_tts_model import LipsyncTtsRequestOpenaiTtsModel +from .lipsync_tts_request_openai_voice_name import LipsyncTtsRequestOpenaiVoiceName +from .lipsync_tts_request_sadtalker_settings import LipsyncTtsRequestSadtalkerSettings +from .lipsync_tts_request_sadtalker_settings_preprocess import LipsyncTtsRequestSadtalkerSettingsPreprocess +from .lipsync_tts_request_selected_model import LipsyncTtsRequestSelectedModel +from .lipsync_tts_request_tts_provider import LipsyncTtsRequestTtsProvider from .llm_tools import LlmTools from .message_part import MessagePart from .object_inpainting_page_output import ObjectInpaintingPageOutput @@ -200,6 +198,12 @@ from .object_inpainting_page_request_functions_item_trigger import ObjectInpaintingPageRequestFunctionsItemTrigger from .object_inpainting_page_request_selected_model import ObjectInpaintingPageRequestSelectedModel from .object_inpainting_page_status_response import ObjectInpaintingPageStatusResponse +from .portrait_request_functions_item import PortraitRequestFunctionsItem +from .portrait_request_functions_item_trigger import PortraitRequestFunctionsItemTrigger +from .portrait_request_selected_model import PortraitRequestSelectedModel +from .product_image_request_functions_item import ProductImageRequestFunctionsItem +from .product_image_request_functions_item_trigger import ProductImageRequestFunctionsItemTrigger +from .product_image_request_selected_model import ProductImageRequestSelectedModel from .prompt_tree_node import PromptTreeNode from .prompt_tree_node_prompt import PromptTreeNodePrompt from .qr_code_generator_page_output import QrCodeGeneratorPageOutput @@ -216,13 +220,19 @@ ) from .qr_code_generator_page_request_selected_model import QrCodeGeneratorPageRequestSelectedModel from .qr_code_generator_page_status_response import QrCodeGeneratorPageStatusResponse +from .qr_code_request_functions_item import QrCodeRequestFunctionsItem +from .qr_code_request_functions_item_trigger import QrCodeRequestFunctionsItemTrigger +from .qr_code_request_image_prompt_controlnet_models_item import QrCodeRequestImagePromptControlnetModelsItem +from .qr_code_request_qr_code_vcard import QrCodeRequestQrCodeVcard +from .qr_code_request_scheduler import QrCodeRequestScheduler +from .qr_code_request_selected_controlnet_model_item import QrCodeRequestSelectedControlnetModelItem +from .qr_code_request_selected_model import QrCodeRequestSelectedModel from .recipe_function import RecipeFunction from .recipe_function_trigger import RecipeFunctionTrigger from .recipe_run_state import RecipeRunState from .related_doc_search_response import RelatedDocSearchResponse from .related_google_gpt_response import RelatedGoogleGptResponse from .related_qn_a_doc_page_output import RelatedQnADocPageOutput -from .related_qn_a_doc_page_request import RelatedQnADocPageRequest from .related_qn_a_doc_page_request_citation_style import RelatedQnADocPageRequestCitationStyle from .related_qn_a_doc_page_request_embedding_model import RelatedQnADocPageRequestEmbeddingModel from .related_qn_a_doc_page_request_functions_item import RelatedQnADocPageRequestFunctionsItem @@ -232,13 +242,20 @@ from .related_qn_a_doc_page_request_selected_model import RelatedQnADocPageRequestSelectedModel from .related_qn_a_doc_page_status_response import RelatedQnADocPageStatusResponse from .related_qn_a_page_output import RelatedQnAPageOutput -from .related_qn_a_page_request import RelatedQnAPageRequest from .related_qn_a_page_request_embedding_model import RelatedQnAPageRequestEmbeddingModel from .related_qn_a_page_request_functions_item import RelatedQnAPageRequestFunctionsItem from .related_qn_a_page_request_functions_item_trigger import RelatedQnAPageRequestFunctionsItemTrigger from .related_qn_a_page_request_response_format_type import RelatedQnAPageRequestResponseFormatType from .related_qn_a_page_request_selected_model import RelatedQnAPageRequestSelectedModel from .related_qn_a_page_status_response import RelatedQnAPageStatusResponse +from .remix_image_request_functions_item import RemixImageRequestFunctionsItem +from .remix_image_request_functions_item_trigger import RemixImageRequestFunctionsItemTrigger +from .remix_image_request_selected_controlnet_model import RemixImageRequestSelectedControlnetModel +from .remix_image_request_selected_controlnet_model_item import RemixImageRequestSelectedControlnetModelItem +from .remix_image_request_selected_model import RemixImageRequestSelectedModel +from .remove_background_request_functions_item import RemoveBackgroundRequestFunctionsItem +from .remove_background_request_functions_item_trigger import RemoveBackgroundRequestFunctionsItemTrigger +from .remove_background_request_selected_model import RemoveBackgroundRequestSelectedModel from .reply_button import ReplyButton from .response_model import ResponseModel from .response_model_final_keyword_query import ResponseModelFinalKeywordQuery @@ -250,34 +267,35 @@ from .sad_talker_settings_preprocess import SadTalkerSettingsPreprocess from .search_reference import SearchReference from .seo_summary_page_output import SeoSummaryPageOutput -from .seo_summary_page_request import SeoSummaryPageRequest from .seo_summary_page_request_response_format_type import SeoSummaryPageRequestResponseFormatType from .seo_summary_page_request_selected_model import SeoSummaryPageRequestSelectedModel from .seo_summary_page_status_response import SeoSummaryPageStatusResponse from .serp_search_location import SerpSearchLocation from .serp_search_type import SerpSearchType from .smart_gpt_page_output import SmartGptPageOutput -from .smart_gpt_page_request import SmartGptPageRequest -from .smart_gpt_page_request_functions_item import SmartGptPageRequestFunctionsItem -from .smart_gpt_page_request_functions_item_trigger import SmartGptPageRequestFunctionsItemTrigger -from .smart_gpt_page_request_response_format_type import SmartGptPageRequestResponseFormatType -from .smart_gpt_page_request_selected_model import SmartGptPageRequestSelectedModel from .smart_gpt_page_status_response import SmartGptPageStatusResponse from .social_lookup_email_page_output import SocialLookupEmailPageOutput -from .social_lookup_email_page_request import SocialLookupEmailPageRequest from .social_lookup_email_page_request_functions_item import SocialLookupEmailPageRequestFunctionsItem from .social_lookup_email_page_request_functions_item_trigger import SocialLookupEmailPageRequestFunctionsItemTrigger from .social_lookup_email_page_request_response_format_type import SocialLookupEmailPageRequestResponseFormatType from .social_lookup_email_page_request_selected_model import SocialLookupEmailPageRequestSelectedModel from .social_lookup_email_page_status_response import SocialLookupEmailPageStatusResponse +from .speech_recognition_request_functions_item import SpeechRecognitionRequestFunctionsItem +from .speech_recognition_request_functions_item_trigger import SpeechRecognitionRequestFunctionsItemTrigger +from .speech_recognition_request_output_format import SpeechRecognitionRequestOutputFormat +from .speech_recognition_request_selected_model import SpeechRecognitionRequestSelectedModel +from .speech_recognition_request_translation_model import SpeechRecognitionRequestTranslationModel from .stream_error import StreamError +from .synthesize_data_request_functions_item import SynthesizeDataRequestFunctionsItem +from .synthesize_data_request_functions_item_trigger import SynthesizeDataRequestFunctionsItemTrigger +from .synthesize_data_request_response_format_type import SynthesizeDataRequestResponseFormatType +from .synthesize_data_request_selected_asr_model import SynthesizeDataRequestSelectedAsrModel +from .synthesize_data_request_selected_model import SynthesizeDataRequestSelectedModel from .text2audio_page_output import Text2AudioPageOutput -from .text2audio_page_request import Text2AudioPageRequest from .text2audio_page_request_functions_item import Text2AudioPageRequestFunctionsItem from .text2audio_page_request_functions_item_trigger import Text2AudioPageRequestFunctionsItemTrigger from .text2audio_page_status_response import Text2AudioPageStatusResponse from .text_to_speech_page_output import TextToSpeechPageOutput -from .text_to_speech_page_request import TextToSpeechPageRequest from .text_to_speech_page_request_functions_item import TextToSpeechPageRequestFunctionsItem from .text_to_speech_page_request_functions_item_trigger import TextToSpeechPageRequestFunctionsItemTrigger from .text_to_speech_page_request_openai_tts_model import TextToSpeechPageRequestOpenaiTtsModel @@ -285,12 +303,18 @@ from .text_to_speech_page_request_tts_provider import TextToSpeechPageRequestTtsProvider from .text_to_speech_page_status_response import TextToSpeechPageStatusResponse from .training_data_model import TrainingDataModel +from .translate_request_functions_item import TranslateRequestFunctionsItem +from .translate_request_functions_item_trigger import TranslateRequestFunctionsItemTrigger +from .translate_request_selected_model import TranslateRequestSelectedModel from .translation_page_output import TranslationPageOutput from .translation_page_request import TranslationPageRequest from .translation_page_request_functions_item import TranslationPageRequestFunctionsItem from .translation_page_request_functions_item_trigger import TranslationPageRequestFunctionsItemTrigger from .translation_page_request_selected_model import TranslationPageRequestSelectedModel from .translation_page_status_response import TranslationPageStatusResponse +from .upscale_request_functions_item import UpscaleRequestFunctionsItem +from .upscale_request_functions_item_trigger import UpscaleRequestFunctionsItemTrigger +from .upscale_request_selected_models_item import UpscaleRequestSelectedModelsItem from .validation_error import ValidationError from .validation_error_loc_item import ValidationErrorLocItem from .vcard import Vcard @@ -343,15 +367,9 @@ "BalanceResponse", "BotBroadcastFilters", "BulkEvalPageOutput", - "BulkEvalPageRequest", - "BulkEvalPageRequestAggFunctionsItem", - "BulkEvalPageRequestAggFunctionsItemFunction", - "BulkEvalPageRequestEvalPromptsItem", - "BulkEvalPageRequestFunctionsItem", - "BulkEvalPageRequestFunctionsItemTrigger", - "BulkEvalPageRequestResponseFormatType", - "BulkEvalPageRequestSelectedModel", "BulkEvalPageStatusResponse", + "BulkRunRequestFunctionsItem", + "BulkRunRequestFunctionsItemTrigger", "BulkRunnerPageOutput", "BulkRunnerPageRequest", "BulkRunnerPageRequestFunctionsItem", @@ -368,14 +386,12 @@ "ChyronPlantPageRequestFunctionsItemTrigger", "ChyronPlantPageStatusResponse", "CompareLlmPageOutput", - "CompareLlmPageRequest", "CompareLlmPageRequestFunctionsItem", "CompareLlmPageRequestFunctionsItemTrigger", "CompareLlmPageRequestResponseFormatType", "CompareLlmPageRequestSelectedModelsItem", "CompareLlmPageStatusResponse", "CompareText2ImgPageOutput", - "CompareText2ImgPageRequest", "CompareText2ImgPageRequestFunctionsItem", "CompareText2ImgPageRequestFunctionsItemTrigger", "CompareText2ImgPageRequestScheduler", @@ -398,7 +414,6 @@ "ConversationStart", "CreateStreamResponse", "DeforumSdPageOutput", - "DeforumSdPageRequest", "DeforumSdPageRequestAnimationPromptsItem", "DeforumSdPageRequestFunctionsItem", "DeforumSdPageRequestFunctionsItemTrigger", @@ -413,7 +428,6 @@ "DocExtractPageRequestSelectedModel", "DocExtractPageStatusResponse", "DocSearchPageOutput", - "DocSearchPageRequest", "DocSearchPageRequestCitationStyle", "DocSearchPageRequestEmbeddingModel", "DocSearchPageRequestFunctionsItem", @@ -430,14 +444,17 @@ "DocSummaryPageRequestSelectedAsrModel", "DocSummaryPageRequestSelectedModel", "DocSummaryPageStatusResponse", + "DocSummaryRequestFunctionsItem", + "DocSummaryRequestFunctionsItemTrigger", + "DocSummaryRequestResponseFormatType", + "DocSummaryRequestSelectedAsrModel", + "DocSummaryRequestSelectedModel", "EmailFaceInpaintingPageOutput", - "EmailFaceInpaintingPageRequest", "EmailFaceInpaintingPageRequestFunctionsItem", "EmailFaceInpaintingPageRequestFunctionsItemTrigger", "EmailFaceInpaintingPageRequestSelectedModel", "EmailFaceInpaintingPageStatusResponse", "EmbeddingsPageOutput", - "EmbeddingsPageRequest", "EmbeddingsPageRequestFunctionsItem", "EmbeddingsPageRequestFunctionsItemTrigger", "EmbeddingsPageRequestSelectedModel", @@ -451,12 +468,10 @@ "FaceInpaintingPageStatusResponse", "FinalResponse", "FunctionsPageOutput", - "FunctionsPageRequest", "FunctionsPageStatusResponse", "GenericErrorResponse", "GenericErrorResponseDetail", "GoogleGptPageOutput", - "GoogleGptPageRequest", "GoogleGptPageRequestEmbeddingModel", "GoogleGptPageRequestFunctionsItem", "GoogleGptPageRequestFunctionsItemTrigger", @@ -464,7 +479,6 @@ "GoogleGptPageRequestSelectedModel", "GoogleGptPageStatusResponse", "GoogleImageGenPageOutput", - "GoogleImageGenPageRequest", "GoogleImageGenPageRequestFunctionsItem", "GoogleImageGenPageRequestFunctionsItemTrigger", "GoogleImageGenPageRequestSelectedModel", @@ -511,6 +525,14 @@ "LipsyncTtsPageRequestSelectedModel", "LipsyncTtsPageRequestTtsProvider", "LipsyncTtsPageStatusResponse", + "LipsyncTtsRequestFunctionsItem", + "LipsyncTtsRequestFunctionsItemTrigger", + "LipsyncTtsRequestOpenaiTtsModel", + "LipsyncTtsRequestOpenaiVoiceName", + "LipsyncTtsRequestSadtalkerSettings", + "LipsyncTtsRequestSadtalkerSettingsPreprocess", + "LipsyncTtsRequestSelectedModel", + "LipsyncTtsRequestTtsProvider", "LlmTools", "MessagePart", "ObjectInpaintingPageOutput", @@ -519,6 +541,12 @@ "ObjectInpaintingPageRequestFunctionsItemTrigger", "ObjectInpaintingPageRequestSelectedModel", "ObjectInpaintingPageStatusResponse", + "PortraitRequestFunctionsItem", + "PortraitRequestFunctionsItemTrigger", + "PortraitRequestSelectedModel", + "ProductImageRequestFunctionsItem", + "ProductImageRequestFunctionsItemTrigger", + "ProductImageRequestSelectedModel", "PromptTreeNode", "PromptTreeNodePrompt", "QrCodeGeneratorPageOutput", @@ -531,13 +559,19 @@ "QrCodeGeneratorPageRequestSelectedControlnetModelItem", "QrCodeGeneratorPageRequestSelectedModel", "QrCodeGeneratorPageStatusResponse", + "QrCodeRequestFunctionsItem", + "QrCodeRequestFunctionsItemTrigger", + "QrCodeRequestImagePromptControlnetModelsItem", + "QrCodeRequestQrCodeVcard", + "QrCodeRequestScheduler", + "QrCodeRequestSelectedControlnetModelItem", + "QrCodeRequestSelectedModel", "RecipeFunction", "RecipeFunctionTrigger", "RecipeRunState", "RelatedDocSearchResponse", "RelatedGoogleGptResponse", "RelatedQnADocPageOutput", - "RelatedQnADocPageRequest", "RelatedQnADocPageRequestCitationStyle", "RelatedQnADocPageRequestEmbeddingModel", "RelatedQnADocPageRequestFunctionsItem", @@ -547,13 +581,20 @@ "RelatedQnADocPageRequestSelectedModel", "RelatedQnADocPageStatusResponse", "RelatedQnAPageOutput", - "RelatedQnAPageRequest", "RelatedQnAPageRequestEmbeddingModel", "RelatedQnAPageRequestFunctionsItem", "RelatedQnAPageRequestFunctionsItemTrigger", "RelatedQnAPageRequestResponseFormatType", "RelatedQnAPageRequestSelectedModel", "RelatedQnAPageStatusResponse", + "RemixImageRequestFunctionsItem", + "RemixImageRequestFunctionsItemTrigger", + "RemixImageRequestSelectedControlnetModel", + "RemixImageRequestSelectedControlnetModelItem", + "RemixImageRequestSelectedModel", + "RemoveBackgroundRequestFunctionsItem", + "RemoveBackgroundRequestFunctionsItemTrigger", + "RemoveBackgroundRequestSelectedModel", "ReplyButton", "ResponseModel", "ResponseModelFinalKeywordQuery", @@ -565,34 +606,35 @@ "SadTalkerSettingsPreprocess", "SearchReference", "SeoSummaryPageOutput", - "SeoSummaryPageRequest", "SeoSummaryPageRequestResponseFormatType", "SeoSummaryPageRequestSelectedModel", "SeoSummaryPageStatusResponse", "SerpSearchLocation", "SerpSearchType", "SmartGptPageOutput", - "SmartGptPageRequest", - "SmartGptPageRequestFunctionsItem", - "SmartGptPageRequestFunctionsItemTrigger", - "SmartGptPageRequestResponseFormatType", - "SmartGptPageRequestSelectedModel", "SmartGptPageStatusResponse", "SocialLookupEmailPageOutput", - "SocialLookupEmailPageRequest", "SocialLookupEmailPageRequestFunctionsItem", "SocialLookupEmailPageRequestFunctionsItemTrigger", "SocialLookupEmailPageRequestResponseFormatType", "SocialLookupEmailPageRequestSelectedModel", "SocialLookupEmailPageStatusResponse", + "SpeechRecognitionRequestFunctionsItem", + "SpeechRecognitionRequestFunctionsItemTrigger", + "SpeechRecognitionRequestOutputFormat", + "SpeechRecognitionRequestSelectedModel", + "SpeechRecognitionRequestTranslationModel", "StreamError", + "SynthesizeDataRequestFunctionsItem", + "SynthesizeDataRequestFunctionsItemTrigger", + "SynthesizeDataRequestResponseFormatType", + "SynthesizeDataRequestSelectedAsrModel", + "SynthesizeDataRequestSelectedModel", "Text2AudioPageOutput", - "Text2AudioPageRequest", "Text2AudioPageRequestFunctionsItem", "Text2AudioPageRequestFunctionsItemTrigger", "Text2AudioPageStatusResponse", "TextToSpeechPageOutput", - "TextToSpeechPageRequest", "TextToSpeechPageRequestFunctionsItem", "TextToSpeechPageRequestFunctionsItemTrigger", "TextToSpeechPageRequestOpenaiTtsModel", @@ -600,12 +642,18 @@ "TextToSpeechPageRequestTtsProvider", "TextToSpeechPageStatusResponse", "TrainingDataModel", + "TranslateRequestFunctionsItem", + "TranslateRequestFunctionsItemTrigger", + "TranslateRequestSelectedModel", "TranslationPageOutput", "TranslationPageRequest", "TranslationPageRequestFunctionsItem", "TranslationPageRequestFunctionsItemTrigger", "TranslationPageRequestSelectedModel", "TranslationPageStatusResponse", + "UpscaleRequestFunctionsItem", + "UpscaleRequestFunctionsItemTrigger", + "UpscaleRequestSelectedModelsItem", "ValidationError", "ValidationErrorLocItem", "Vcard", diff --git a/src/gooey/types/bulk_eval_page_request.py b/src/gooey/types/bulk_eval_page_request.py deleted file mode 100644 index 1c2e417..0000000 --- a/src/gooey/types/bulk_eval_page_request.py +++ /dev/null @@ -1,56 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from ..core.pydantic_utilities import UniversalBaseModel -import typing -from .bulk_eval_page_request_functions_item import BulkEvalPageRequestFunctionsItem -import pydantic -from .bulk_eval_page_request_eval_prompts_item import BulkEvalPageRequestEvalPromptsItem -from .bulk_eval_page_request_agg_functions_item import BulkEvalPageRequestAggFunctionsItem -from .bulk_eval_page_request_selected_model import BulkEvalPageRequestSelectedModel -from .bulk_eval_page_request_response_format_type import BulkEvalPageRequestResponseFormatType -from .run_settings import RunSettings -from ..core.pydantic_utilities import IS_PYDANTIC_V2 - - -class BulkEvalPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[BulkEvalPageRequestFunctionsItem]] = None - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - documents: typing.List[str] = pydantic.Field() - """ - Upload or link to a CSV or google sheet that contains your sample input data. - For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. - Remember to includes header names in your CSV too. - """ - - eval_prompts: typing.Optional[typing.List[BulkEvalPageRequestEvalPromptsItem]] = pydantic.Field(default=None) - """ - Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values. - _The `columns` dictionary can be used to reference the spreadsheet columns._ - """ - - agg_functions: typing.Optional[typing.List[BulkEvalPageRequestAggFunctionsItem]] = pydantic.Field(default=None) - """ - Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats). - """ - - selected_model: typing.Optional[BulkEvalPageRequestSelectedModel] = None - avoid_repetition: typing.Optional[bool] = None - num_outputs: typing.Optional[int] = None - quality: typing.Optional[float] = None - max_tokens: typing.Optional[int] = None - sampling_temperature: typing.Optional[float] = None - response_format_type: typing.Optional[BulkEvalPageRequestResponseFormatType] = None - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/bulk_run_request_functions_item.py b/src/gooey/types/bulk_run_request_functions_item.py new file mode 100644 index 0000000..fd948fe --- /dev/null +++ b/src/gooey/types/bulk_run_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .bulk_run_request_functions_item_trigger import BulkRunRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class BulkRunRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: BulkRunRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/bulk_run_request_functions_item_trigger.py b/src/gooey/types/bulk_run_request_functions_item_trigger.py new file mode 100644 index 0000000..e932588 --- /dev/null +++ b/src/gooey/types/bulk_run_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +BulkRunRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/compare_llm_page_request.py b/src/gooey/types/compare_llm_page_request.py deleted file mode 100644 index 4665729..0000000 --- a/src/gooey/types/compare_llm_page_request.py +++ /dev/null @@ -1,37 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from ..core.pydantic_utilities import UniversalBaseModel -import typing -from .compare_llm_page_request_functions_item import CompareLlmPageRequestFunctionsItem -import pydantic -from .compare_llm_page_request_selected_models_item import CompareLlmPageRequestSelectedModelsItem -from .compare_llm_page_request_response_format_type import CompareLlmPageRequestResponseFormatType -from .run_settings import RunSettings -from ..core.pydantic_utilities import IS_PYDANTIC_V2 - - -class CompareLlmPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[CompareLlmPageRequestFunctionsItem]] = None - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - input_prompt: typing.Optional[str] = None - selected_models: typing.Optional[typing.List[CompareLlmPageRequestSelectedModelsItem]] = None - avoid_repetition: typing.Optional[bool] = None - num_outputs: typing.Optional[int] = None - quality: typing.Optional[float] = None - max_tokens: typing.Optional[int] = None - sampling_temperature: typing.Optional[float] = None - response_format_type: typing.Optional[CompareLlmPageRequestResponseFormatType] = None - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/compare_text2img_page_request.py b/src/gooey/types/compare_text2img_page_request.py deleted file mode 100644 index 7c7a17c..0000000 --- a/src/gooey/types/compare_text2img_page_request.py +++ /dev/null @@ -1,44 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from ..core.pydantic_utilities import UniversalBaseModel -import typing -from .compare_text2img_page_request_functions_item import CompareText2ImgPageRequestFunctionsItem -import pydantic -from .compare_text2img_page_request_selected_models_item import CompareText2ImgPageRequestSelectedModelsItem -from .compare_text2img_page_request_scheduler import CompareText2ImgPageRequestScheduler -from .run_settings import RunSettings -from ..core.pydantic_utilities import IS_PYDANTIC_V2 - - -class CompareText2ImgPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[CompareText2ImgPageRequestFunctionsItem]] = None - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - text_prompt: str - negative_prompt: typing.Optional[str] = None - output_width: typing.Optional[int] = None - output_height: typing.Optional[int] = None - num_outputs: typing.Optional[int] = None - quality: typing.Optional[int] = None - dall_e3quality: typing.Optional[str] = pydantic.Field(alias="dall_e_3_quality", default=None) - dall_e3style: typing.Optional[str] = pydantic.Field(alias="dall_e_3_style", default=None) - guidance_scale: typing.Optional[float] = None - seed: typing.Optional[int] = None - sd2upscaling: typing.Optional[bool] = pydantic.Field(alias="sd_2_upscaling", default=None) - selected_models: typing.Optional[typing.List[CompareText2ImgPageRequestSelectedModelsItem]] = None - scheduler: typing.Optional[CompareText2ImgPageRequestScheduler] = None - edit_instruction: typing.Optional[str] = None - image_guidance_scale: typing.Optional[float] = None - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/deforum_sd_page_request.py b/src/gooey/types/deforum_sd_page_request.py deleted file mode 100644 index 0daeb38..0000000 --- a/src/gooey/types/deforum_sd_page_request.py +++ /dev/null @@ -1,41 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from ..core.pydantic_utilities import UniversalBaseModel -import typing -from .deforum_sd_page_request_functions_item import DeforumSdPageRequestFunctionsItem -import pydantic -from .deforum_sd_page_request_animation_prompts_item import DeforumSdPageRequestAnimationPromptsItem -from .deforum_sd_page_request_selected_model import DeforumSdPageRequestSelectedModel -from .run_settings import RunSettings -from ..core.pydantic_utilities import IS_PYDANTIC_V2 - - -class DeforumSdPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[DeforumSdPageRequestFunctionsItem]] = None - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - animation_prompts: typing.List[DeforumSdPageRequestAnimationPromptsItem] - max_frames: typing.Optional[int] = None - selected_model: typing.Optional[DeforumSdPageRequestSelectedModel] = None - animation_mode: typing.Optional[str] = None - zoom: typing.Optional[str] = None - translation_x: typing.Optional[str] = None - translation_y: typing.Optional[str] = None - rotation3d_x: typing.Optional[str] = pydantic.Field(alias="rotation_3d_x", default=None) - rotation3d_y: typing.Optional[str] = pydantic.Field(alias="rotation_3d_y", default=None) - rotation3d_z: typing.Optional[str] = pydantic.Field(alias="rotation_3d_z", default=None) - fps: typing.Optional[int] = None - seed: typing.Optional[int] = None - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/doc_search_page_request.py b/src/gooey/types/doc_search_page_request.py deleted file mode 100644 index 18ba1e9..0000000 --- a/src/gooey/types/doc_search_page_request.py +++ /dev/null @@ -1,56 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from ..core.pydantic_utilities import UniversalBaseModel -import typing -from .doc_search_page_request_functions_item import DocSearchPageRequestFunctionsItem -import pydantic -from .doc_search_page_request_keyword_query import DocSearchPageRequestKeywordQuery -from .doc_search_page_request_embedding_model import DocSearchPageRequestEmbeddingModel -from .doc_search_page_request_selected_model import DocSearchPageRequestSelectedModel -from .doc_search_page_request_citation_style import DocSearchPageRequestCitationStyle -from .doc_search_page_request_response_format_type import DocSearchPageRequestResponseFormatType -from .run_settings import RunSettings -from ..core.pydantic_utilities import IS_PYDANTIC_V2 - - -class DocSearchPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[DocSearchPageRequestFunctionsItem]] = None - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - search_query: str - keyword_query: typing.Optional[DocSearchPageRequestKeywordQuery] = None - documents: typing.Optional[typing.List[str]] = None - max_references: typing.Optional[int] = None - max_context_words: typing.Optional[int] = None - scroll_jump: typing.Optional[int] = None - doc_extract_url: typing.Optional[str] = None - embedding_model: typing.Optional[DocSearchPageRequestEmbeddingModel] = None - dense_weight: typing.Optional[float] = pydantic.Field(default=None) - """ - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - """ - - task_instructions: typing.Optional[str] = None - query_instructions: typing.Optional[str] = None - selected_model: typing.Optional[DocSearchPageRequestSelectedModel] = None - citation_style: typing.Optional[DocSearchPageRequestCitationStyle] = None - avoid_repetition: typing.Optional[bool] = None - num_outputs: typing.Optional[int] = None - quality: typing.Optional[float] = None - max_tokens: typing.Optional[int] = None - sampling_temperature: typing.Optional[float] = None - response_format_type: typing.Optional[DocSearchPageRequestResponseFormatType] = None - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/doc_summary_request_functions_item.py b/src/gooey/types/doc_summary_request_functions_item.py new file mode 100644 index 0000000..b3b893b --- /dev/null +++ b/src/gooey/types/doc_summary_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .doc_summary_request_functions_item_trigger import DocSummaryRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class DocSummaryRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: DocSummaryRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/doc_summary_request_functions_item_trigger.py b/src/gooey/types/doc_summary_request_functions_item_trigger.py new file mode 100644 index 0000000..482357f --- /dev/null +++ b/src/gooey/types/doc_summary_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +DocSummaryRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/doc_summary_request_response_format_type.py b/src/gooey/types/doc_summary_request_response_format_type.py new file mode 100644 index 0000000..8fabf9b --- /dev/null +++ b/src/gooey/types/doc_summary_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +DocSummaryRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/doc_summary_request_selected_asr_model.py b/src/gooey/types/doc_summary_request_selected_asr_model.py new file mode 100644 index 0000000..8b8a338 --- /dev/null +++ b/src/gooey/types/doc_summary_request_selected_asr_model.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +DocSummaryRequestSelectedAsrModel = typing.Union[ + typing.Literal[ + "whisper_large_v2", + "whisper_large_v3", + "whisper_hindi_large_v2", + "whisper_telugu_large_v2", + "nemo_english", + "nemo_hindi", + "vakyansh_bhojpuri", + "gcp_v1", + "usm", + "deepgram", + "azure", + "seamless_m4t_v2", + "mms_1b_all", + "seamless_m4t", + ], + typing.Any, +] diff --git a/src/gooey/types/doc_summary_request_selected_model.py b/src/gooey/types/doc_summary_request_selected_model.py new file mode 100644 index 0000000..db13c45 --- /dev/null +++ b/src/gooey/types/doc_summary_request_selected_model.py @@ -0,0 +1,47 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +DocSummaryRequestSelectedModel = typing.Union[ + typing.Literal[ + "gpt_4_o", + "gpt_4_o_mini", + "chatgpt_4_o", + "gpt_4_turbo_vision", + "gpt_4_vision", + "gpt_4_turbo", + "gpt_4", + "gpt_4_32k", + "gpt_3_5_turbo", + "gpt_3_5_turbo_16k", + "gpt_3_5_turbo_instruct", + "llama3_70b", + "llama_3_groq_70b_tool_use", + "llama3_8b", + "llama_3_groq_8b_tool_use", + "llama2_70b_chat", + "mixtral_8x7b_instruct_0_1", + "gemma_2_9b_it", + "gemma_7b_it", + "gemini_1_5_flash", + "gemini_1_5_pro", + "gemini_1_pro_vision", + "gemini_1_pro", + "palm2_chat", + "palm2_text", + "claude_3_5_sonnet", + "claude_3_opus", + "claude_3_sonnet", + "claude_3_haiku", + "sea_lion_7b_instruct", + "llama3_8b_cpt_sea_lion_v2_instruct", + "sarvam_2b", + "text_davinci_003", + "text_davinci_002", + "code_davinci_002", + "text_curie_001", + "text_babbage_001", + "text_ada_001", + ], + typing.Any, +] diff --git a/src/gooey/types/email_face_inpainting_page_request.py b/src/gooey/types/email_face_inpainting_page_request.py deleted file mode 100644 index d28b82c..0000000 --- a/src/gooey/types/email_face_inpainting_page_request.py +++ /dev/null @@ -1,51 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from ..core.pydantic_utilities import UniversalBaseModel -import typing -from .email_face_inpainting_page_request_functions_item import EmailFaceInpaintingPageRequestFunctionsItem -import pydantic -from .email_face_inpainting_page_request_selected_model import EmailFaceInpaintingPageRequestSelectedModel -from .run_settings import RunSettings -from ..core.pydantic_utilities import IS_PYDANTIC_V2 - - -class EmailFaceInpaintingPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[EmailFaceInpaintingPageRequestFunctionsItem]] = None - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - email_address: typing.Optional[str] = None - twitter_handle: typing.Optional[str] = None - text_prompt: str - face_scale: typing.Optional[float] = None - face_pos_x: typing.Optional[float] = None - face_pos_y: typing.Optional[float] = None - selected_model: typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] = None - negative_prompt: typing.Optional[str] = None - num_outputs: typing.Optional[int] = None - quality: typing.Optional[int] = None - upscale_factor: typing.Optional[float] = None - output_width: typing.Optional[int] = None - output_height: typing.Optional[int] = None - guidance_scale: typing.Optional[float] = None - should_send_email: typing.Optional[bool] = None - email_from: typing.Optional[str] = None - email_cc: typing.Optional[str] = None - email_bcc: typing.Optional[str] = None - email_subject: typing.Optional[str] = None - email_body: typing.Optional[str] = None - email_body_enable_html: typing.Optional[bool] = None - fallback_email_body: typing.Optional[str] = None - seed: typing.Optional[int] = None - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/embeddings_page_request.py b/src/gooey/types/embeddings_page_request.py deleted file mode 100644 index 10e63ff..0000000 --- a/src/gooey/types/embeddings_page_request.py +++ /dev/null @@ -1,30 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from ..core.pydantic_utilities import UniversalBaseModel -import typing -from .embeddings_page_request_functions_item import EmbeddingsPageRequestFunctionsItem -import pydantic -from .embeddings_page_request_selected_model import EmbeddingsPageRequestSelectedModel -from .run_settings import RunSettings -from ..core.pydantic_utilities import IS_PYDANTIC_V2 - - -class EmbeddingsPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[EmbeddingsPageRequestFunctionsItem]] = None - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - texts: typing.List[str] - selected_model: typing.Optional[EmbeddingsPageRequestSelectedModel] = None - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/google_gpt_page_request.py b/src/gooey/types/google_gpt_page_request.py deleted file mode 100644 index a2dfbae..0000000 --- a/src/gooey/types/google_gpt_page_request.py +++ /dev/null @@ -1,66 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from ..core.pydantic_utilities import UniversalBaseModel -import typing -from .google_gpt_page_request_functions_item import GoogleGptPageRequestFunctionsItem -import pydantic -from .google_gpt_page_request_selected_model import GoogleGptPageRequestSelectedModel -from .google_gpt_page_request_embedding_model import GoogleGptPageRequestEmbeddingModel -from .google_gpt_page_request_response_format_type import GoogleGptPageRequestResponseFormatType -from .serp_search_location import SerpSearchLocation -from .serp_search_type import SerpSearchType -from .run_settings import RunSettings -from ..core.pydantic_utilities import IS_PYDANTIC_V2 - - -class GoogleGptPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[GoogleGptPageRequestFunctionsItem]] = None - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - search_query: str - site_filter: str - task_instructions: typing.Optional[str] = None - query_instructions: typing.Optional[str] = None - selected_model: typing.Optional[GoogleGptPageRequestSelectedModel] = None - max_search_urls: typing.Optional[int] = None - max_references: typing.Optional[int] = None - max_context_words: typing.Optional[int] = None - scroll_jump: typing.Optional[int] = None - embedding_model: typing.Optional[GoogleGptPageRequestEmbeddingModel] = None - dense_weight: typing.Optional[float] = pydantic.Field(default=None) - """ - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - """ - - avoid_repetition: typing.Optional[bool] = None - num_outputs: typing.Optional[int] = None - quality: typing.Optional[float] = None - max_tokens: typing.Optional[int] = None - sampling_temperature: typing.Optional[float] = None - response_format_type: typing.Optional[GoogleGptPageRequestResponseFormatType] = None - serp_search_location: typing.Optional[SerpSearchLocation] = None - scaleserp_locations: typing.Optional[typing.List[str]] = pydantic.Field(default=None) - """ - DEPRECATED: use `serp_search_location` instead - """ - - serp_search_type: typing.Optional[SerpSearchType] = None - scaleserp_search_field: typing.Optional[str] = pydantic.Field(default=None) - """ - DEPRECATED: use `serp_search_type` instead - """ - - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/google_image_gen_page_request.py b/src/gooey/types/google_image_gen_page_request.py deleted file mode 100644 index d9164fa..0000000 --- a/src/gooey/types/google_image_gen_page_request.py +++ /dev/null @@ -1,46 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from ..core.pydantic_utilities import UniversalBaseModel -import typing -from .google_image_gen_page_request_functions_item import GoogleImageGenPageRequestFunctionsItem -import pydantic -from .serp_search_location import SerpSearchLocation -from .google_image_gen_page_request_selected_model import GoogleImageGenPageRequestSelectedModel -from .run_settings import RunSettings -from ..core.pydantic_utilities import IS_PYDANTIC_V2 - - -class GoogleImageGenPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[GoogleImageGenPageRequestFunctionsItem]] = None - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - serp_search_location: typing.Optional[SerpSearchLocation] = None - scaleserp_locations: typing.Optional[typing.List[str]] = pydantic.Field(default=None) - """ - DEPRECATED: use `serp_search_location` instead - """ - - search_query: str - text_prompt: str - selected_model: typing.Optional[GoogleImageGenPageRequestSelectedModel] = None - negative_prompt: typing.Optional[str] = None - num_outputs: typing.Optional[int] = None - quality: typing.Optional[int] = None - guidance_scale: typing.Optional[float] = None - prompt_strength: typing.Optional[float] = None - sd2upscaling: typing.Optional[bool] = pydantic.Field(alias="sd_2_upscaling", default=None) - seed: typing.Optional[int] = None - image_guidance_scale: typing.Optional[float] = None - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/lipsync_tts_request_functions_item.py b/src/gooey/types/lipsync_tts_request_functions_item.py new file mode 100644 index 0000000..566c643 --- /dev/null +++ b/src/gooey/types/lipsync_tts_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .lipsync_tts_request_functions_item_trigger import LipsyncTtsRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class LipsyncTtsRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: LipsyncTtsRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/lipsync_tts_request_functions_item_trigger.py b/src/gooey/types/lipsync_tts_request_functions_item_trigger.py new file mode 100644 index 0000000..c31400f --- /dev/null +++ b/src/gooey/types/lipsync_tts_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +LipsyncTtsRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/lipsync_tts_request_openai_tts_model.py b/src/gooey/types/lipsync_tts_request_openai_tts_model.py new file mode 100644 index 0000000..510dcfb --- /dev/null +++ b/src/gooey/types/lipsync_tts_request_openai_tts_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +LipsyncTtsRequestOpenaiTtsModel = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any] diff --git a/src/gooey/types/lipsync_tts_request_openai_voice_name.py b/src/gooey/types/lipsync_tts_request_openai_voice_name.py new file mode 100644 index 0000000..7ea601b --- /dev/null +++ b/src/gooey/types/lipsync_tts_request_openai_voice_name.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +LipsyncTtsRequestOpenaiVoiceName = typing.Union[ + typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any +] diff --git a/src/gooey/types/lipsync_tts_request_sadtalker_settings.py b/src/gooey/types/lipsync_tts_request_sadtalker_settings.py new file mode 100644 index 0000000..e7a4abb --- /dev/null +++ b/src/gooey/types/lipsync_tts_request_sadtalker_settings.py @@ -0,0 +1,40 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .lipsync_tts_request_sadtalker_settings_preprocess import LipsyncTtsRequestSadtalkerSettingsPreprocess +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class LipsyncTtsRequestSadtalkerSettings(UniversalBaseModel): + still: typing.Optional[bool] = None + preprocess: typing.Optional[LipsyncTtsRequestSadtalkerSettingsPreprocess] = pydantic.Field(default=None) + """ + SadTalker only generates 512x512 output. 'crop' handles this by cropping the input to 512x512. 'resize' scales down the input to fit 512x512 and scales it back up after lipsyncing (does not work well for full person images, better for portraits). 'full' processes the cropped region and pastes it back into the original input. 'extcrop' and 'extfull' are similar to 'crop' and 'full' but with extended cropping. + """ + + pose_style: typing.Optional[int] = pydantic.Field(default=None) + """ + Random seed 0-45 inclusive that affects how the pose is animated. + """ + + expression_scale: typing.Optional[float] = pydantic.Field(default=None) + """ + Scale the amount of expression motion. 1.0 is normal, 0.5 is very reduced, and 2.0 is quite a lot. + """ + + ref_eyeblink: typing.Optional[str] = None + ref_pose: typing.Optional[str] = None + input_yaw: typing.Optional[typing.List[int]] = None + input_pitch: typing.Optional[typing.List[int]] = None + input_roll: typing.Optional[typing.List[int]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/lipsync_tts_request_sadtalker_settings_preprocess.py b/src/gooey/types/lipsync_tts_request_sadtalker_settings_preprocess.py new file mode 100644 index 0000000..10ee6d5 --- /dev/null +++ b/src/gooey/types/lipsync_tts_request_sadtalker_settings_preprocess.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +LipsyncTtsRequestSadtalkerSettingsPreprocess = typing.Union[ + typing.Literal["crop", "extcrop", "resize", "full", "extfull"], typing.Any +] diff --git a/src/gooey/types/lipsync_tts_request_selected_model.py b/src/gooey/types/lipsync_tts_request_selected_model.py new file mode 100644 index 0000000..9ece5a9 --- /dev/null +++ b/src/gooey/types/lipsync_tts_request_selected_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +LipsyncTtsRequestSelectedModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any] diff --git a/src/gooey/types/lipsync_tts_request_tts_provider.py b/src/gooey/types/lipsync_tts_request_tts_provider.py new file mode 100644 index 0000000..1a23fe3 --- /dev/null +++ b/src/gooey/types/lipsync_tts_request_tts_provider.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +LipsyncTtsRequestTtsProvider = typing.Union[ + typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any +] diff --git a/src/gooey/types/portrait_request_functions_item.py b/src/gooey/types/portrait_request_functions_item.py new file mode 100644 index 0000000..697af03 --- /dev/null +++ b/src/gooey/types/portrait_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .portrait_request_functions_item_trigger import PortraitRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class PortraitRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: PortraitRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/portrait_request_functions_item_trigger.py b/src/gooey/types/portrait_request_functions_item_trigger.py new file mode 100644 index 0000000..3311280 --- /dev/null +++ b/src/gooey/types/portrait_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PortraitRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/portrait_request_selected_model.py b/src/gooey/types/portrait_request_selected_model.py new file mode 100644 index 0000000..6c4a5ce --- /dev/null +++ b/src/gooey/types/portrait_request_selected_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PortraitRequestSelectedModel = typing.Union[typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any] diff --git a/src/gooey/types/product_image_request_functions_item.py b/src/gooey/types/product_image_request_functions_item.py new file mode 100644 index 0000000..edf0725 --- /dev/null +++ b/src/gooey/types/product_image_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .product_image_request_functions_item_trigger import ProductImageRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class ProductImageRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: ProductImageRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/product_image_request_functions_item_trigger.py b/src/gooey/types/product_image_request_functions_item_trigger.py new file mode 100644 index 0000000..126411b --- /dev/null +++ b/src/gooey/types/product_image_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ProductImageRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/product_image_request_selected_model.py b/src/gooey/types/product_image_request_selected_model.py new file mode 100644 index 0000000..f1ce039 --- /dev/null +++ b/src/gooey/types/product_image_request_selected_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ProductImageRequestSelectedModel = typing.Union[typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any] diff --git a/src/gooey/types/qr_code_request_functions_item.py b/src/gooey/types/qr_code_request_functions_item.py new file mode 100644 index 0000000..3ce28d9 --- /dev/null +++ b/src/gooey/types/qr_code_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .qr_code_request_functions_item_trigger import QrCodeRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class QrCodeRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: QrCodeRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/qr_code_request_functions_item_trigger.py b/src/gooey/types/qr_code_request_functions_item_trigger.py new file mode 100644 index 0000000..5ee88c7 --- /dev/null +++ b/src/gooey/types/qr_code_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +QrCodeRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/qr_code_request_image_prompt_controlnet_models_item.py b/src/gooey/types/qr_code_request_image_prompt_controlnet_models_item.py new file mode 100644 index 0000000..3be2ab6 --- /dev/null +++ b/src/gooey/types/qr_code_request_image_prompt_controlnet_models_item.py @@ -0,0 +1,20 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +QrCodeRequestImagePromptControlnetModelsItem = typing.Union[ + typing.Literal[ + "sd_controlnet_canny", + "sd_controlnet_depth", + "sd_controlnet_hed", + "sd_controlnet_mlsd", + "sd_controlnet_normal", + "sd_controlnet_openpose", + "sd_controlnet_scribble", + "sd_controlnet_seg", + "sd_controlnet_tile", + "sd_controlnet_brightness", + "control_v1p_sd15_qrcode_monster_v2", + ], + typing.Any, +] diff --git a/src/gooey/types/qr_code_request_qr_code_vcard.py b/src/gooey/types/qr_code_request_qr_code_vcard.py new file mode 100644 index 0000000..28b6dee --- /dev/null +++ b/src/gooey/types/qr_code_request_qr_code_vcard.py @@ -0,0 +1,44 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class QrCodeRequestQrCodeVcard(UniversalBaseModel): + format_name: str + email: typing.Optional[str] = None + gender: typing.Optional[str] = None + birthday_year: typing.Optional[int] = None + birthday_month: typing.Optional[int] = None + birthday_day: typing.Optional[int] = None + family_name: typing.Optional[str] = None + given_name: typing.Optional[str] = None + middle_names: typing.Optional[str] = None + honorific_prefixes: typing.Optional[str] = None + honorific_suffixes: typing.Optional[str] = None + impp: typing.Optional[str] = None + address: typing.Optional[str] = None + calendar_url: typing.Optional[str] = None + comma_separated_categories: typing.Optional[str] = None + kind: typing.Optional[str] = None + language: typing.Optional[str] = None + organization: typing.Optional[str] = None + photo_url: typing.Optional[str] = None + logo_url: typing.Optional[str] = None + role: typing.Optional[str] = None + timezone: typing.Optional[str] = None + job_title: typing.Optional[str] = None + urls: typing.Optional[typing.List[str]] = None + tel: typing.Optional[str] = None + note: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/qr_code_request_scheduler.py b/src/gooey/types/qr_code_request_scheduler.py new file mode 100644 index 0000000..890b204 --- /dev/null +++ b/src/gooey/types/qr_code_request_scheduler.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +QrCodeRequestScheduler = typing.Union[ + typing.Literal[ + "singlestep_dpm_solver", + "multistep_dpm_solver", + "dpm_sde", + "dpm_discrete", + "dpm_discrete_ancestral", + "unipc", + "lms_discrete", + "heun", + "euler", + "euler_ancestral", + "pndm", + "ddpm", + "ddim", + "deis", + ], + typing.Any, +] diff --git a/src/gooey/types/qr_code_request_selected_controlnet_model_item.py b/src/gooey/types/qr_code_request_selected_controlnet_model_item.py new file mode 100644 index 0000000..c5cdc8d --- /dev/null +++ b/src/gooey/types/qr_code_request_selected_controlnet_model_item.py @@ -0,0 +1,20 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +QrCodeRequestSelectedControlnetModelItem = typing.Union[ + typing.Literal[ + "sd_controlnet_canny", + "sd_controlnet_depth", + "sd_controlnet_hed", + "sd_controlnet_mlsd", + "sd_controlnet_normal", + "sd_controlnet_openpose", + "sd_controlnet_scribble", + "sd_controlnet_seg", + "sd_controlnet_tile", + "sd_controlnet_brightness", + "control_v1p_sd15_qrcode_monster_v2", + ], + typing.Any, +] diff --git a/src/gooey/types/qr_code_request_selected_model.py b/src/gooey/types/qr_code_request_selected_model.py new file mode 100644 index 0000000..7ea963c --- /dev/null +++ b/src/gooey/types/qr_code_request_selected_model.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +QrCodeRequestSelectedModel = typing.Union[ + typing.Literal[ + "dream_shaper", + "dreamlike_2", + "sd_2", + "sd_1_5", + "dall_e", + "dall_e_3", + "openjourney_2", + "openjourney", + "analog_diffusion", + "protogen_5_3", + "jack_qiao", + "rodent_diffusion_1_5", + "deepfloyd_if", + ], + typing.Any, +] diff --git a/src/gooey/types/related_qn_a_doc_page_request.py b/src/gooey/types/related_qn_a_doc_page_request.py deleted file mode 100644 index d39235f..0000000 --- a/src/gooey/types/related_qn_a_doc_page_request.py +++ /dev/null @@ -1,70 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from ..core.pydantic_utilities import UniversalBaseModel -import typing -from .related_qn_a_doc_page_request_functions_item import RelatedQnADocPageRequestFunctionsItem -import pydantic -from .related_qn_a_doc_page_request_keyword_query import RelatedQnADocPageRequestKeywordQuery -from .related_qn_a_doc_page_request_embedding_model import RelatedQnADocPageRequestEmbeddingModel -from .related_qn_a_doc_page_request_selected_model import RelatedQnADocPageRequestSelectedModel -from .related_qn_a_doc_page_request_citation_style import RelatedQnADocPageRequestCitationStyle -from .related_qn_a_doc_page_request_response_format_type import RelatedQnADocPageRequestResponseFormatType -from .serp_search_location import SerpSearchLocation -from .serp_search_type import SerpSearchType -from .run_settings import RunSettings -from ..core.pydantic_utilities import IS_PYDANTIC_V2 - - -class RelatedQnADocPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[RelatedQnADocPageRequestFunctionsItem]] = None - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - search_query: str - keyword_query: typing.Optional[RelatedQnADocPageRequestKeywordQuery] = None - documents: typing.Optional[typing.List[str]] = None - max_references: typing.Optional[int] = None - max_context_words: typing.Optional[int] = None - scroll_jump: typing.Optional[int] = None - doc_extract_url: typing.Optional[str] = None - embedding_model: typing.Optional[RelatedQnADocPageRequestEmbeddingModel] = None - dense_weight: typing.Optional[float] = pydantic.Field(default=None) - """ - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - """ - - task_instructions: typing.Optional[str] = None - query_instructions: typing.Optional[str] = None - selected_model: typing.Optional[RelatedQnADocPageRequestSelectedModel] = None - citation_style: typing.Optional[RelatedQnADocPageRequestCitationStyle] = None - avoid_repetition: typing.Optional[bool] = None - num_outputs: typing.Optional[int] = None - quality: typing.Optional[float] = None - max_tokens: typing.Optional[int] = None - sampling_temperature: typing.Optional[float] = None - response_format_type: typing.Optional[RelatedQnADocPageRequestResponseFormatType] = None - serp_search_location: typing.Optional[SerpSearchLocation] = None - scaleserp_locations: typing.Optional[typing.List[str]] = pydantic.Field(default=None) - """ - DEPRECATED: use `serp_search_location` instead - """ - - serp_search_type: typing.Optional[SerpSearchType] = None - scaleserp_search_field: typing.Optional[str] = pydantic.Field(default=None) - """ - DEPRECATED: use `serp_search_type` instead - """ - - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/related_qn_a_page_request.py b/src/gooey/types/related_qn_a_page_request.py deleted file mode 100644 index 26836dc..0000000 --- a/src/gooey/types/related_qn_a_page_request.py +++ /dev/null @@ -1,66 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from ..core.pydantic_utilities import UniversalBaseModel -import typing -from .related_qn_a_page_request_functions_item import RelatedQnAPageRequestFunctionsItem -import pydantic -from .related_qn_a_page_request_selected_model import RelatedQnAPageRequestSelectedModel -from .related_qn_a_page_request_embedding_model import RelatedQnAPageRequestEmbeddingModel -from .related_qn_a_page_request_response_format_type import RelatedQnAPageRequestResponseFormatType -from .serp_search_location import SerpSearchLocation -from .serp_search_type import SerpSearchType -from .run_settings import RunSettings -from ..core.pydantic_utilities import IS_PYDANTIC_V2 - - -class RelatedQnAPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[RelatedQnAPageRequestFunctionsItem]] = None - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - search_query: str - site_filter: str - task_instructions: typing.Optional[str] = None - query_instructions: typing.Optional[str] = None - selected_model: typing.Optional[RelatedQnAPageRequestSelectedModel] = None - max_search_urls: typing.Optional[int] = None - max_references: typing.Optional[int] = None - max_context_words: typing.Optional[int] = None - scroll_jump: typing.Optional[int] = None - embedding_model: typing.Optional[RelatedQnAPageRequestEmbeddingModel] = None - dense_weight: typing.Optional[float] = pydantic.Field(default=None) - """ - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - """ - - avoid_repetition: typing.Optional[bool] = None - num_outputs: typing.Optional[int] = None - quality: typing.Optional[float] = None - max_tokens: typing.Optional[int] = None - sampling_temperature: typing.Optional[float] = None - response_format_type: typing.Optional[RelatedQnAPageRequestResponseFormatType] = None - serp_search_location: typing.Optional[SerpSearchLocation] = None - scaleserp_locations: typing.Optional[typing.List[str]] = pydantic.Field(default=None) - """ - DEPRECATED: use `serp_search_location` instead - """ - - serp_search_type: typing.Optional[SerpSearchType] = None - scaleserp_search_field: typing.Optional[str] = pydantic.Field(default=None) - """ - DEPRECATED: use `serp_search_type` instead - """ - - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/remix_image_request_functions_item.py b/src/gooey/types/remix_image_request_functions_item.py new file mode 100644 index 0000000..e8b6f67 --- /dev/null +++ b/src/gooey/types/remix_image_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .remix_image_request_functions_item_trigger import RemixImageRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class RemixImageRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: RemixImageRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/remix_image_request_functions_item_trigger.py b/src/gooey/types/remix_image_request_functions_item_trigger.py new file mode 100644 index 0000000..b131ae6 --- /dev/null +++ b/src/gooey/types/remix_image_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +RemixImageRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/remix_image_request_selected_controlnet_model.py b/src/gooey/types/remix_image_request_selected_controlnet_model.py new file mode 100644 index 0000000..eea207f --- /dev/null +++ b/src/gooey/types/remix_image_request_selected_controlnet_model.py @@ -0,0 +1,19 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .remix_image_request_selected_controlnet_model_item import RemixImageRequestSelectedControlnetModelItem + +RemixImageRequestSelectedControlnetModel = typing.Union[ + typing.List[RemixImageRequestSelectedControlnetModelItem], + typing.Literal["sd_controlnet_canny"], + typing.Literal["sd_controlnet_depth"], + typing.Literal["sd_controlnet_hed"], + typing.Literal["sd_controlnet_mlsd"], + typing.Literal["sd_controlnet_normal"], + typing.Literal["sd_controlnet_openpose"], + typing.Literal["sd_controlnet_scribble"], + typing.Literal["sd_controlnet_seg"], + typing.Literal["sd_controlnet_tile"], + typing.Literal["sd_controlnet_brightness"], + typing.Literal["control_v1p_sd15_qrcode_monster_v2"], +] diff --git a/src/gooey/types/remix_image_request_selected_controlnet_model_item.py b/src/gooey/types/remix_image_request_selected_controlnet_model_item.py new file mode 100644 index 0000000..b4f3ff0 --- /dev/null +++ b/src/gooey/types/remix_image_request_selected_controlnet_model_item.py @@ -0,0 +1,20 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +RemixImageRequestSelectedControlnetModelItem = typing.Union[ + typing.Literal[ + "sd_controlnet_canny", + "sd_controlnet_depth", + "sd_controlnet_hed", + "sd_controlnet_mlsd", + "sd_controlnet_normal", + "sd_controlnet_openpose", + "sd_controlnet_scribble", + "sd_controlnet_seg", + "sd_controlnet_tile", + "sd_controlnet_brightness", + "control_v1p_sd15_qrcode_monster_v2", + ], + typing.Any, +] diff --git a/src/gooey/types/remix_image_request_selected_model.py b/src/gooey/types/remix_image_request_selected_model.py new file mode 100644 index 0000000..245d6b0 --- /dev/null +++ b/src/gooey/types/remix_image_request_selected_model.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +RemixImageRequestSelectedModel = typing.Union[ + typing.Literal[ + "dream_shaper", + "dreamlike_2", + "sd_2", + "sd_1_5", + "dall_e", + "instruct_pix2pix", + "openjourney_2", + "openjourney", + "analog_diffusion", + "protogen_5_3", + "jack_qiao", + "rodent_diffusion_1_5", + ], + typing.Any, +] diff --git a/src/gooey/types/remove_background_request_functions_item.py b/src/gooey/types/remove_background_request_functions_item.py new file mode 100644 index 0000000..09ab9cc --- /dev/null +++ b/src/gooey/types/remove_background_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .remove_background_request_functions_item_trigger import RemoveBackgroundRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class RemoveBackgroundRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: RemoveBackgroundRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/remove_background_request_functions_item_trigger.py b/src/gooey/types/remove_background_request_functions_item_trigger.py new file mode 100644 index 0000000..d240cf2 --- /dev/null +++ b/src/gooey/types/remove_background_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +RemoveBackgroundRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/remove_background_request_selected_model.py b/src/gooey/types/remove_background_request_selected_model.py new file mode 100644 index 0000000..c84f0e7 --- /dev/null +++ b/src/gooey/types/remove_background_request_selected_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +RemoveBackgroundRequestSelectedModel = typing.Union[typing.Literal["dis", "u2net"], typing.Any] diff --git a/src/gooey/types/seo_summary_page_request.py b/src/gooey/types/seo_summary_page_request.py deleted file mode 100644 index 4515d26..0000000 --- a/src/gooey/types/seo_summary_page_request.py +++ /dev/null @@ -1,52 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from ..core.pydantic_utilities import UniversalBaseModel -import typing -from .seo_summary_page_request_selected_model import SeoSummaryPageRequestSelectedModel -from .seo_summary_page_request_response_format_type import SeoSummaryPageRequestResponseFormatType -from .serp_search_location import SerpSearchLocation -import pydantic -from .serp_search_type import SerpSearchType -from .run_settings import RunSettings -from ..core.pydantic_utilities import IS_PYDANTIC_V2 - - -class SeoSummaryPageRequest(UniversalBaseModel): - search_query: str - keywords: str - title: str - company_url: str - task_instructions: typing.Optional[str] = None - enable_html: typing.Optional[bool] = None - selected_model: typing.Optional[SeoSummaryPageRequestSelectedModel] = None - max_search_urls: typing.Optional[int] = None - enable_crosslinks: typing.Optional[bool] = None - seed: typing.Optional[int] = None - avoid_repetition: typing.Optional[bool] = None - num_outputs: typing.Optional[int] = None - quality: typing.Optional[float] = None - max_tokens: typing.Optional[int] = None - sampling_temperature: typing.Optional[float] = None - response_format_type: typing.Optional[SeoSummaryPageRequestResponseFormatType] = None - serp_search_location: typing.Optional[SerpSearchLocation] = None - scaleserp_locations: typing.Optional[typing.List[str]] = pydantic.Field(default=None) - """ - DEPRECATED: use `serp_search_location` instead - """ - - serp_search_type: typing.Optional[SerpSearchType] = None - scaleserp_search_field: typing.Optional[str] = pydantic.Field(default=None) - """ - DEPRECATED: use `serp_search_type` instead - """ - - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/smart_gpt_page_request.py b/src/gooey/types/smart_gpt_page_request.py deleted file mode 100644 index d2353ff..0000000 --- a/src/gooey/types/smart_gpt_page_request.py +++ /dev/null @@ -1,40 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from ..core.pydantic_utilities import UniversalBaseModel -import typing -from .smart_gpt_page_request_functions_item import SmartGptPageRequestFunctionsItem -import pydantic -from .smart_gpt_page_request_selected_model import SmartGptPageRequestSelectedModel -from .smart_gpt_page_request_response_format_type import SmartGptPageRequestResponseFormatType -from .run_settings import RunSettings -from ..core.pydantic_utilities import IS_PYDANTIC_V2 - - -class SmartGptPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[SmartGptPageRequestFunctionsItem]] = None - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - input_prompt: str - cot_prompt: typing.Optional[str] = None - reflexion_prompt: typing.Optional[str] = None - dera_prompt: typing.Optional[str] = None - selected_model: typing.Optional[SmartGptPageRequestSelectedModel] = None - avoid_repetition: typing.Optional[bool] = None - num_outputs: typing.Optional[int] = None - quality: typing.Optional[float] = None - max_tokens: typing.Optional[int] = None - sampling_temperature: typing.Optional[float] = None - response_format_type: typing.Optional[SmartGptPageRequestResponseFormatType] = None - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/social_lookup_email_page_request.py b/src/gooey/types/social_lookup_email_page_request.py deleted file mode 100644 index a09b47e..0000000 --- a/src/gooey/types/social_lookup_email_page_request.py +++ /dev/null @@ -1,38 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from ..core.pydantic_utilities import UniversalBaseModel -import typing -from .social_lookup_email_page_request_functions_item import SocialLookupEmailPageRequestFunctionsItem -import pydantic -from .social_lookup_email_page_request_selected_model import SocialLookupEmailPageRequestSelectedModel -from .social_lookup_email_page_request_response_format_type import SocialLookupEmailPageRequestResponseFormatType -from .run_settings import RunSettings -from ..core.pydantic_utilities import IS_PYDANTIC_V2 - - -class SocialLookupEmailPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[SocialLookupEmailPageRequestFunctionsItem]] = None - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - email_address: str - input_prompt: typing.Optional[str] = None - selected_model: typing.Optional[SocialLookupEmailPageRequestSelectedModel] = None - avoid_repetition: typing.Optional[bool] = None - num_outputs: typing.Optional[int] = None - quality: typing.Optional[float] = None - max_tokens: typing.Optional[int] = None - sampling_temperature: typing.Optional[float] = None - response_format_type: typing.Optional[SocialLookupEmailPageRequestResponseFormatType] = None - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/functions_page_request.py b/src/gooey/types/speech_recognition_request_functions_item.py similarity index 56% rename from src/gooey/types/functions_page_request.py rename to src/gooey/types/speech_recognition_request_functions_item.py index f0e15f4..f4f0a12 100644 --- a/src/gooey/types/functions_page_request.py +++ b/src/gooey/types/speech_recognition_request_functions_item.py @@ -1,25 +1,19 @@ # This file was auto-generated by Fern from our API Definition. from ..core.pydantic_utilities import UniversalBaseModel -import typing +from .speech_recognition_request_functions_item_trigger import SpeechRecognitionRequestFunctionsItemTrigger import pydantic -from .run_settings import RunSettings from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing -class FunctionsPageRequest(UniversalBaseModel): - code: typing.Optional[str] = pydantic.Field(default=None) - """ - The JS code to be executed. - """ - - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) +class SpeechRecognitionRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: SpeechRecognitionRequestFunctionsItemTrigger = pydantic.Field() """ - Variables to be used in the code + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. """ - settings: typing.Optional[RunSettings] = None - if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: diff --git a/src/gooey/types/speech_recognition_request_functions_item_trigger.py b/src/gooey/types/speech_recognition_request_functions_item_trigger.py new file mode 100644 index 0000000..a842118 --- /dev/null +++ b/src/gooey/types/speech_recognition_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +SpeechRecognitionRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/speech_recognition_request_output_format.py b/src/gooey/types/speech_recognition_request_output_format.py new file mode 100644 index 0000000..4d2cf2b --- /dev/null +++ b/src/gooey/types/speech_recognition_request_output_format.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +SpeechRecognitionRequestOutputFormat = typing.Union[typing.Literal["text", "json", "srt", "vtt"], typing.Any] diff --git a/src/gooey/types/speech_recognition_request_selected_model.py b/src/gooey/types/speech_recognition_request_selected_model.py new file mode 100644 index 0000000..9d2d28f --- /dev/null +++ b/src/gooey/types/speech_recognition_request_selected_model.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +SpeechRecognitionRequestSelectedModel = typing.Union[ + typing.Literal[ + "whisper_large_v2", + "whisper_large_v3", + "whisper_hindi_large_v2", + "whisper_telugu_large_v2", + "nemo_english", + "nemo_hindi", + "vakyansh_bhojpuri", + "gcp_v1", + "usm", + "deepgram", + "azure", + "seamless_m4t_v2", + "mms_1b_all", + "seamless_m4t", + ], + typing.Any, +] diff --git a/src/gooey/types/speech_recognition_request_translation_model.py b/src/gooey/types/speech_recognition_request_translation_model.py new file mode 100644 index 0000000..886ab92 --- /dev/null +++ b/src/gooey/types/speech_recognition_request_translation_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +SpeechRecognitionRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any] diff --git a/src/gooey/types/synthesize_data_request_functions_item.py b/src/gooey/types/synthesize_data_request_functions_item.py new file mode 100644 index 0000000..264f3cc --- /dev/null +++ b/src/gooey/types/synthesize_data_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .synthesize_data_request_functions_item_trigger import SynthesizeDataRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class SynthesizeDataRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: SynthesizeDataRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/synthesize_data_request_functions_item_trigger.py b/src/gooey/types/synthesize_data_request_functions_item_trigger.py new file mode 100644 index 0000000..53d88fa --- /dev/null +++ b/src/gooey/types/synthesize_data_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +SynthesizeDataRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/synthesize_data_request_response_format_type.py b/src/gooey/types/synthesize_data_request_response_format_type.py new file mode 100644 index 0000000..3ab37a9 --- /dev/null +++ b/src/gooey/types/synthesize_data_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +SynthesizeDataRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/synthesize_data_request_selected_asr_model.py b/src/gooey/types/synthesize_data_request_selected_asr_model.py new file mode 100644 index 0000000..6c1bc21 --- /dev/null +++ b/src/gooey/types/synthesize_data_request_selected_asr_model.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +SynthesizeDataRequestSelectedAsrModel = typing.Union[ + typing.Literal[ + "whisper_large_v2", + "whisper_large_v3", + "whisper_hindi_large_v2", + "whisper_telugu_large_v2", + "nemo_english", + "nemo_hindi", + "vakyansh_bhojpuri", + "gcp_v1", + "usm", + "deepgram", + "azure", + "seamless_m4t_v2", + "mms_1b_all", + "seamless_m4t", + ], + typing.Any, +] diff --git a/src/gooey/types/synthesize_data_request_selected_model.py b/src/gooey/types/synthesize_data_request_selected_model.py new file mode 100644 index 0000000..42bde95 --- /dev/null +++ b/src/gooey/types/synthesize_data_request_selected_model.py @@ -0,0 +1,47 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +SynthesizeDataRequestSelectedModel = typing.Union[ + typing.Literal[ + "gpt_4_o", + "gpt_4_o_mini", + "chatgpt_4_o", + "gpt_4_turbo_vision", + "gpt_4_vision", + "gpt_4_turbo", + "gpt_4", + "gpt_4_32k", + "gpt_3_5_turbo", + "gpt_3_5_turbo_16k", + "gpt_3_5_turbo_instruct", + "llama3_70b", + "llama_3_groq_70b_tool_use", + "llama3_8b", + "llama_3_groq_8b_tool_use", + "llama2_70b_chat", + "mixtral_8x7b_instruct_0_1", + "gemma_2_9b_it", + "gemma_7b_it", + "gemini_1_5_flash", + "gemini_1_5_pro", + "gemini_1_pro_vision", + "gemini_1_pro", + "palm2_chat", + "palm2_text", + "claude_3_5_sonnet", + "claude_3_opus", + "claude_3_sonnet", + "claude_3_haiku", + "sea_lion_7b_instruct", + "llama3_8b_cpt_sea_lion_v2_instruct", + "sarvam_2b", + "text_davinci_003", + "text_davinci_002", + "code_davinci_002", + "text_curie_001", + "text_babbage_001", + "text_ada_001", + ], + typing.Any, +] diff --git a/src/gooey/types/text2audio_page_request.py b/src/gooey/types/text2audio_page_request.py deleted file mode 100644 index 5594488..0000000 --- a/src/gooey/types/text2audio_page_request.py +++ /dev/null @@ -1,36 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from ..core.pydantic_utilities import UniversalBaseModel -import typing -from .text2audio_page_request_functions_item import Text2AudioPageRequestFunctionsItem -import pydantic -from .run_settings import RunSettings -from ..core.pydantic_utilities import IS_PYDANTIC_V2 - - -class Text2AudioPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[Text2AudioPageRequestFunctionsItem]] = None - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - text_prompt: str - negative_prompt: typing.Optional[str] = None - duration_sec: typing.Optional[float] = None - num_outputs: typing.Optional[int] = None - quality: typing.Optional[int] = None - guidance_scale: typing.Optional[float] = None - seed: typing.Optional[int] = None - sd2upscaling: typing.Optional[bool] = pydantic.Field(alias="sd_2_upscaling", default=None) - selected_models: typing.Optional[typing.List[typing.Literal["audio_ldm"]]] = None - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/text_to_speech_page_request.py b/src/gooey/types/text_to_speech_page_request.py deleted file mode 100644 index fa527a5..0000000 --- a/src/gooey/types/text_to_speech_page_request.py +++ /dev/null @@ -1,53 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from ..core.pydantic_utilities import UniversalBaseModel -import typing -from .text_to_speech_page_request_functions_item import TextToSpeechPageRequestFunctionsItem -import pydantic -from .text_to_speech_page_request_tts_provider import TextToSpeechPageRequestTtsProvider -from .text_to_speech_page_request_openai_voice_name import TextToSpeechPageRequestOpenaiVoiceName -from .text_to_speech_page_request_openai_tts_model import TextToSpeechPageRequestOpenaiTtsModel -from .run_settings import RunSettings -from ..core.pydantic_utilities import IS_PYDANTIC_V2 - - -class TextToSpeechPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[TextToSpeechPageRequestFunctionsItem]] = None - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - text_prompt: str - tts_provider: typing.Optional[TextToSpeechPageRequestTtsProvider] = None - uberduck_voice_name: typing.Optional[str] = None - uberduck_speaking_rate: typing.Optional[float] = None - google_voice_name: typing.Optional[str] = None - google_speaking_rate: typing.Optional[float] = None - google_pitch: typing.Optional[float] = None - bark_history_prompt: typing.Optional[str] = None - elevenlabs_voice_name: typing.Optional[str] = pydantic.Field(default=None) - """ - Use `elevenlabs_voice_id` instead - """ - - elevenlabs_api_key: typing.Optional[str] = None - elevenlabs_voice_id: typing.Optional[str] = None - elevenlabs_model: typing.Optional[str] = None - elevenlabs_stability: typing.Optional[float] = None - elevenlabs_similarity_boost: typing.Optional[float] = None - elevenlabs_style: typing.Optional[float] = None - elevenlabs_speaker_boost: typing.Optional[bool] = None - azure_voice_name: typing.Optional[str] = None - openai_voice_name: typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] = None - openai_tts_model: typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] = None - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/translate_request_functions_item.py b/src/gooey/types/translate_request_functions_item.py new file mode 100644 index 0000000..7b8674c --- /dev/null +++ b/src/gooey/types/translate_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .translate_request_functions_item_trigger import TranslateRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class TranslateRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: TranslateRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/translate_request_functions_item_trigger.py b/src/gooey/types/translate_request_functions_item_trigger.py new file mode 100644 index 0000000..8066e6f --- /dev/null +++ b/src/gooey/types/translate_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +TranslateRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/translate_request_selected_model.py b/src/gooey/types/translate_request_selected_model.py new file mode 100644 index 0000000..b774b56 --- /dev/null +++ b/src/gooey/types/translate_request_selected_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +TranslateRequestSelectedModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any] diff --git a/src/gooey/types/upscale_request_functions_item.py b/src/gooey/types/upscale_request_functions_item.py new file mode 100644 index 0000000..c2c2c3b --- /dev/null +++ b/src/gooey/types/upscale_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .upscale_request_functions_item_trigger import UpscaleRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class UpscaleRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: UpscaleRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/upscale_request_functions_item_trigger.py b/src/gooey/types/upscale_request_functions_item_trigger.py new file mode 100644 index 0000000..f5813c0 --- /dev/null +++ b/src/gooey/types/upscale_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +UpscaleRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/upscale_request_selected_models_item.py b/src/gooey/types/upscale_request_selected_models_item.py new file mode 100644 index 0000000..1a8362e --- /dev/null +++ b/src/gooey/types/upscale_request_selected_models_item.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +UpscaleRequestSelectedModelsItem = typing.Union[ + typing.Literal["gfpgan_1_4", "real_esrgan_x2", "sd_x4", "real_esrgan", "gfpgan"], typing.Any +]