From 4db1425af1e23481d7a17bd89c55b7373a80527e Mon Sep 17 00:00:00 2001
From: fern-api <115122769+fern-api[bot]@users.noreply.github.com>
Date: Thu, 5 Sep 2024 15:31:30 +0000
Subject: [PATCH] Release 0.0.1-beta5
---
README.md | 22 +-
pyproject.toml | 2 +-
reference.md | 5249 +++-------
src/gooey/__init__.py | 168 +-
src/gooey/client.py | 8565 +++++++----------
.../copilot_for_your_enterprise/__init__.py | 25 -
.../copilot_for_your_enterprise/client.py | 603 +-
.../types/__init__.py | 25 -
src/gooey/core/client_wrapper.py | 2 +-
src/gooey/errors/__init__.py | 10 +-
src/gooey/errors/bad_request_error.py | 9 +
src/gooey/errors/internal_server_error.py | 9 +
src/gooey/evaluator/__init__.py | 3 -
src/gooey/evaluator/client.py | 210 +-
src/gooey/evaluator/types/__init__.py | 6 -
src/gooey/functions/client.py | 91 +-
src/gooey/lip_syncing/__init__.py | 3 -
src/gooey/lip_syncing/client.py | 164 +-
src/gooey/lip_syncing/types/__init__.py | 5 -
src/gooey/smart_gpt/__init__.py | 3 -
src/gooey/smart_gpt/client.py | 192 +-
src/gooey/smart_gpt/types/__init__.py | 6 -
src/gooey/types/__init__.py | 158 +
src/gooey/types/asr_page_request.py | 49 +
.../types/body_async_form_art_qr_code.py | 20 +
src/gooey/types/body_async_form_asr.py | 20 +
src/gooey/types/body_async_form_bulk_eval.py | 20 +
.../types/body_async_form_bulk_runner.py | 20 +
.../types/body_async_form_chyron_plant.py | 20 +
.../body_async_form_compare_ai_upscalers.py | 20 +
.../types/body_async_form_compare_llm.py | 20 +
.../types/body_async_form_compare_text2img.py | 20 +
src/gooey/types/body_async_form_deforum_sd.py | 20 +
.../types/body_async_form_doc_extract.py | 20 +
src/gooey/types/body_async_form_doc_search.py | 20 +
.../types/body_async_form_doc_summary.py | 20 +
.../body_async_form_email_face_inpainting.py | 20 +
src/gooey/types/body_async_form_embeddings.py | 20 +
.../types/body_async_form_face_inpainting.py | 20 +
src/gooey/types/body_async_form_functions.py | 20 +
src/gooey/types/body_async_form_google_gpt.py | 20 +
.../types/body_async_form_google_image_gen.py | 20 +
.../body_async_form_image_segmentation.py | 20 +
src/gooey/types/body_async_form_img2img.py | 20 +
.../types/body_async_form_letter_writer.py | 20 +
src/gooey/types/body_async_form_lipsync.py | 20 +
.../types/body_async_form_lipsync_tts.py | 20 +
.../body_async_form_object_inpainting.py | 20 +
.../body_async_form_related_qna_maker.py | 20 +
.../body_async_form_related_qna_maker_doc.py | 20 +
.../types/body_async_form_seo_summary.py | 20 +
src/gooey/types/body_async_form_smart_gpt.py | 20 +
.../body_async_form_social_lookup_email.py | 20 +
src/gooey/types/body_async_form_text2audio.py | 20 +
.../types/body_async_form_text_to_speech.py | 20 +
src/gooey/types/body_async_form_translate.py | 20 +
src/gooey/types/body_async_form_video_bots.py | 20 +
src/gooey/types/bulk_eval_page_request.py | 57 +
..._eval_page_request_response_format_type.py | 0
.../bulk_eval_page_request_selected_model.py | 0
src/gooey/types/bulk_runner_page_request.py | 56 +
src/gooey/types/compare_llm_page_request.py | 38 +
.../types/compare_text2img_page_request.py | 45 +
.../types/compare_upscaler_page_request.py | 46 +
src/gooey/types/deforum_sd_page_request.py | 42 +
src/gooey/types/doc_extract_page_request.py | 49 +
src/gooey/types/doc_search_page_request.py | 57 +
src/gooey/types/doc_summary_page_request.py | 44 +
.../email_face_inpainting_page_request.py | 52 +
src/gooey/types/embeddings_page_request.py | 31 +
.../types/face_inpainting_page_request.py | 43 +
src/gooey/types/functions_page_request.py | 31 +
src/gooey/types/google_gpt_page_request.py | 67 +
.../types/google_image_gen_page_request.py | 47 +
.../types/image_segmentation_page_request.py | 37 +
src/gooey/types/img2img_page_request.py | 44 +
src/gooey/types/lipsync_page_request.py | 38 +
.../lipsync_page_request_selected_model.py | 0
src/gooey/types/lipsync_tts_page_request.py | 63 +
.../types/object_inpainting_page_request.py | 44 +
.../types/qr_code_generator_page_request.py | 67 +
.../types/related_qn_a_doc_page_request.py | 71 +
src/gooey/types/related_qn_a_page_request.py | 67 +
src/gooey/types/seo_summary_page_request.py | 53 +
src/gooey/types/smart_gpt_page_request.py | 41 +
...t_gpt_page_request_response_format_type.py | 0
.../smart_gpt_page_request_selected_model.py | 0
.../types/social_lookup_email_page_request.py | 39 +
src/gooey/types/text2audio_page_request.py | 37 +
.../types/text_to_speech_page_request.py | 54 +
src/gooey/types/translation_page_request.py | 39 +
src/gooey/types/video_bots_page_request.py | 140 +
.../video_bots_page_request_asr_model.py | 0
.../video_bots_page_request_citation_style.py | 0
...video_bots_page_request_embedding_model.py | 0
.../video_bots_page_request_lipsync_model.py | 0
...ideo_bots_page_request_openai_tts_model.py | 0
...deo_bots_page_request_openai_voice_name.py | 0
..._bots_page_request_response_format_type.py | 0
.../video_bots_page_request_selected_model.py | 0
...deo_bots_page_request_translation_model.py | 0
.../video_bots_page_request_tts_provider.py | 0
102 files changed, 7523 insertions(+), 10255 deletions(-)
delete mode 100644 src/gooey/copilot_for_your_enterprise/types/__init__.py
create mode 100644 src/gooey/errors/bad_request_error.py
create mode 100644 src/gooey/errors/internal_server_error.py
delete mode 100644 src/gooey/evaluator/types/__init__.py
delete mode 100644 src/gooey/lip_syncing/types/__init__.py
delete mode 100644 src/gooey/smart_gpt/types/__init__.py
create mode 100644 src/gooey/types/asr_page_request.py
create mode 100644 src/gooey/types/body_async_form_art_qr_code.py
create mode 100644 src/gooey/types/body_async_form_asr.py
create mode 100644 src/gooey/types/body_async_form_bulk_eval.py
create mode 100644 src/gooey/types/body_async_form_bulk_runner.py
create mode 100644 src/gooey/types/body_async_form_chyron_plant.py
create mode 100644 src/gooey/types/body_async_form_compare_ai_upscalers.py
create mode 100644 src/gooey/types/body_async_form_compare_llm.py
create mode 100644 src/gooey/types/body_async_form_compare_text2img.py
create mode 100644 src/gooey/types/body_async_form_deforum_sd.py
create mode 100644 src/gooey/types/body_async_form_doc_extract.py
create mode 100644 src/gooey/types/body_async_form_doc_search.py
create mode 100644 src/gooey/types/body_async_form_doc_summary.py
create mode 100644 src/gooey/types/body_async_form_email_face_inpainting.py
create mode 100644 src/gooey/types/body_async_form_embeddings.py
create mode 100644 src/gooey/types/body_async_form_face_inpainting.py
create mode 100644 src/gooey/types/body_async_form_functions.py
create mode 100644 src/gooey/types/body_async_form_google_gpt.py
create mode 100644 src/gooey/types/body_async_form_google_image_gen.py
create mode 100644 src/gooey/types/body_async_form_image_segmentation.py
create mode 100644 src/gooey/types/body_async_form_img2img.py
create mode 100644 src/gooey/types/body_async_form_letter_writer.py
create mode 100644 src/gooey/types/body_async_form_lipsync.py
create mode 100644 src/gooey/types/body_async_form_lipsync_tts.py
create mode 100644 src/gooey/types/body_async_form_object_inpainting.py
create mode 100644 src/gooey/types/body_async_form_related_qna_maker.py
create mode 100644 src/gooey/types/body_async_form_related_qna_maker_doc.py
create mode 100644 src/gooey/types/body_async_form_seo_summary.py
create mode 100644 src/gooey/types/body_async_form_smart_gpt.py
create mode 100644 src/gooey/types/body_async_form_social_lookup_email.py
create mode 100644 src/gooey/types/body_async_form_text2audio.py
create mode 100644 src/gooey/types/body_async_form_text_to_speech.py
create mode 100644 src/gooey/types/body_async_form_translate.py
create mode 100644 src/gooey/types/body_async_form_video_bots.py
create mode 100644 src/gooey/types/bulk_eval_page_request.py
rename src/gooey/{evaluator => }/types/bulk_eval_page_request_response_format_type.py (100%)
rename src/gooey/{evaluator => }/types/bulk_eval_page_request_selected_model.py (100%)
create mode 100644 src/gooey/types/bulk_runner_page_request.py
create mode 100644 src/gooey/types/compare_llm_page_request.py
create mode 100644 src/gooey/types/compare_text2img_page_request.py
create mode 100644 src/gooey/types/compare_upscaler_page_request.py
create mode 100644 src/gooey/types/deforum_sd_page_request.py
create mode 100644 src/gooey/types/doc_extract_page_request.py
create mode 100644 src/gooey/types/doc_search_page_request.py
create mode 100644 src/gooey/types/doc_summary_page_request.py
create mode 100644 src/gooey/types/email_face_inpainting_page_request.py
create mode 100644 src/gooey/types/embeddings_page_request.py
create mode 100644 src/gooey/types/face_inpainting_page_request.py
create mode 100644 src/gooey/types/functions_page_request.py
create mode 100644 src/gooey/types/google_gpt_page_request.py
create mode 100644 src/gooey/types/google_image_gen_page_request.py
create mode 100644 src/gooey/types/image_segmentation_page_request.py
create mode 100644 src/gooey/types/img2img_page_request.py
create mode 100644 src/gooey/types/lipsync_page_request.py
rename src/gooey/{lip_syncing => }/types/lipsync_page_request_selected_model.py (100%)
create mode 100644 src/gooey/types/lipsync_tts_page_request.py
create mode 100644 src/gooey/types/object_inpainting_page_request.py
create mode 100644 src/gooey/types/qr_code_generator_page_request.py
create mode 100644 src/gooey/types/related_qn_a_doc_page_request.py
create mode 100644 src/gooey/types/related_qn_a_page_request.py
create mode 100644 src/gooey/types/seo_summary_page_request.py
create mode 100644 src/gooey/types/smart_gpt_page_request.py
rename src/gooey/{smart_gpt => }/types/smart_gpt_page_request_response_format_type.py (100%)
rename src/gooey/{smart_gpt => }/types/smart_gpt_page_request_selected_model.py (100%)
create mode 100644 src/gooey/types/social_lookup_email_page_request.py
create mode 100644 src/gooey/types/text2audio_page_request.py
create mode 100644 src/gooey/types/text_to_speech_page_request.py
create mode 100644 src/gooey/types/translation_page_request.py
create mode 100644 src/gooey/types/video_bots_page_request.py
rename src/gooey/{copilot_for_your_enterprise => }/types/video_bots_page_request_asr_model.py (100%)
rename src/gooey/{copilot_for_your_enterprise => }/types/video_bots_page_request_citation_style.py (100%)
rename src/gooey/{copilot_for_your_enterprise => }/types/video_bots_page_request_embedding_model.py (100%)
rename src/gooey/{copilot_for_your_enterprise => }/types/video_bots_page_request_lipsync_model.py (100%)
rename src/gooey/{copilot_for_your_enterprise => }/types/video_bots_page_request_openai_tts_model.py (100%)
rename src/gooey/{copilot_for_your_enterprise => }/types/video_bots_page_request_openai_voice_name.py (100%)
rename src/gooey/{copilot_for_your_enterprise => }/types/video_bots_page_request_response_format_type.py (100%)
rename src/gooey/{copilot_for_your_enterprise => }/types/video_bots_page_request_selected_model.py (100%)
rename src/gooey/{copilot_for_your_enterprise => }/types/video_bots_page_request_translation_model.py (100%)
rename src/gooey/{copilot_for_your_enterprise => }/types/video_bots_page_request_tts_provider.py (100%)
diff --git a/README.md b/README.md
index 278b73e..020f347 100644
--- a/README.md
+++ b/README.md
@@ -16,19 +16,12 @@ pip install gooeyai
Instantiate and use the client with the following:
```python
-from gooey import AnimationPrompt, Gooey
+from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.animate(
- animation_prompts=[
- AnimationPrompt(
- frame="frame",
- prompt="prompt",
- )
- ],
-)
+client.animate()
```
## Async Client
@@ -38,7 +31,7 @@ The SDK also exports an `async` client so that you can make non-blocking calls t
```python
import asyncio
-from gooey import AnimationPrompt, AsyncGooey
+from gooey import AsyncGooey
client = AsyncGooey(
api_key="YOUR_API_KEY",
@@ -46,14 +39,7 @@ client = AsyncGooey(
async def main() -> None:
- await client.animate(
- animation_prompts=[
- AnimationPrompt(
- frame="frame",
- prompt="prompt",
- )
- ],
- )
+ await client.animate()
asyncio.run(main())
diff --git a/pyproject.toml b/pyproject.toml
index 745914e..6c886fb 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "gooeyai"
-version = "0.0.1-beta3"
+version = "0.0.1-beta5"
description = ""
readme = "README.md"
authors = []
diff --git a/reference.md b/reference.md
index b24b52e..9cf98dd 100644
--- a/reference.md
+++ b/reference.md
@@ -12,19 +12,12 @@
```python
-from gooey import AnimationPrompt, Gooey
+from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.animate(
- animation_prompts=[
- AnimationPrompt(
- frame="frame",
- prompt="prompt",
- )
- ],
-)
+client.animate()
```
@@ -40,7 +33,7 @@ client.animate(
-
-**animation_prompts:** `typing.Sequence[AnimationPrompt]`
+**example_id:** `typing.Optional[str]`
@@ -48,63 +41,53 @@ client.animate(
-
-**example_id:** `typing.Optional[str]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
--
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
-
-
--
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
-
+
+client.qr_code(...)
-
-**max_frames:** `typing.Optional[int]`
-
-
-
+#### 🔌 Usage
-
-**selected_model:** `typing.Optional[DeforumSdPageRequestSelectedModel]`
-
-
-
-
-
-**animation_mode:** `typing.Optional[str]`
-
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.qr_code()
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**zoom:** `typing.Optional[str]`
-
-
-
-
-
-**translation_x:** `typing.Optional[str]`
+**example_id:** `typing.Optional[str]`
@@ -112,55 +95,53 @@ client.animate(
-
-**translation_y:** `typing.Optional[str]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**rotation3d_x:** `typing.Optional[str]`
-
+
+client.seo_people_also_ask(...)
-
-**rotation3d_y:** `typing.Optional[str]`
-
-
-
+#### 🔌 Usage
-
-**rotation3d_z:** `typing.Optional[str]`
-
-
-
-
-
-**fps:** `typing.Optional[int]`
-
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.seo_people_also_ask()
+
+```
+
+
+
+#### ⚙️ Parameters
-
-**seed:** `typing.Optional[int]`
-
-
-
-
-
-**settings:** `typing.Optional[RunSettings]`
+**example_id:** `typing.Optional[str]`
@@ -180,7 +161,7 @@ client.animate(
-client.qr_code(...)
+client.seo_content(...)
-
@@ -198,9 +179,7 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.qr_code(
- text_prompt="text_prompt",
-)
+client.seo_content()
```
@@ -216,7 +195,7 @@ client.qr_code(
-
-**text_prompt:** `str`
+**example_id:** `typing.Optional[str]`
@@ -224,63 +203,53 @@ client.qr_code(
-
-**example_id:** `typing.Optional[str]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
--
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
-
-
--
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
-
+
+client.web_search_llm(...)
-
-**qr_code_data:** `typing.Optional[str]`
-
-
-
+#### 🔌 Usage
-
-**qr_code_input_image:** `typing.Optional[str]`
-
-
-
-
-
-**qr_code_vcard:** `typing.Optional[Vcard]`
-
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.web_search_llm()
+
+```
+
+
+
+#### ⚙️ Parameters
-
-**qr_code_file:** `typing.Optional[str]`
-
-
-
-
-
-**use_url_shortener:** `typing.Optional[bool]`
+**example_id:** `typing.Optional[str]`
@@ -288,57 +257,53 @@ client.qr_code(
-
-**negative_prompt:** `typing.Optional[str]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**image_prompt:** `typing.Optional[str]`
-
+
+client.personalize_email(...)
-
-**image_prompt_controlnet_models:** `typing.Optional[
- typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]
-]`
-
-
-
+#### 🔌 Usage
-
-**image_prompt_strength:** `typing.Optional[float]`
-
-
-
-
-
-**image_prompt_scale:** `typing.Optional[float]`
-
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.personalize_email()
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**image_prompt_pos_x:** `typing.Optional[float]`
-
-
-
-
-
-**image_prompt_pos_y:** `typing.Optional[float]`
+**example_id:** `typing.Optional[str]`
@@ -346,57 +311,53 @@ client.qr_code(
-
-**selected_model:** `typing.Optional[QrCodeGeneratorPageRequestSelectedModel]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**selected_controlnet_model:** `typing.Optional[
- typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem]
-]`
-
+
+client.bulk_run(...)
-
-**output_width:** `typing.Optional[int]`
-
-
-
+#### 🔌 Usage
-
-**output_height:** `typing.Optional[int]`
-
-
-
-
-
-**guidance_scale:** `typing.Optional[float]`
-
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.bulk_run()
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**controlnet_conditioning_scale:** `typing.Optional[typing.Sequence[float]]`
-
-
-
-
-
-**num_outputs:** `typing.Optional[int]`
+**example_id:** `typing.Optional[str]`
@@ -404,55 +365,53 @@ client.qr_code(
-
-**quality:** `typing.Optional[int]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**scheduler:** `typing.Optional[QrCodeGeneratorPageRequestScheduler]`
-
+
+client.synthesize_data(...)
-
-**seed:** `typing.Optional[int]`
-
-
-
+#### 🔌 Usage
-
-**obj_scale:** `typing.Optional[float]`
-
-
-
-
-
-**obj_pos_x:** `typing.Optional[float]`
-
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.synthesize_data()
+
+```
+
+
+
+#### ⚙️ Parameters
-
-**obj_pos_y:** `typing.Optional[float]`
-
-
-
-
-
-**settings:** `typing.Optional[RunSettings]`
+**example_id:** `typing.Optional[str]`
@@ -472,7 +431,7 @@ client.qr_code(
-client.seo_people_also_ask(...)
+client.llm(...)
-
@@ -490,10 +449,7 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.seo_people_also_ask(
- search_query="search_query",
- site_filter="site_filter",
-)
+client.llm()
```
@@ -509,7 +465,7 @@ client.seo_people_also_ask(
-
-**search_query:** `str`
+**example_id:** `typing.Optional[str]`
@@ -517,71 +473,53 @@ client.seo_people_also_ask(
-
-**site_filter:** `str`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
--
-
-**example_id:** `typing.Optional[str]`
-
-
--
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
-
+
+client.rag(...)
-
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
-
-
-
+#### 🔌 Usage
-
-**task_instructions:** `typing.Optional[str]`
-
-
-
-
-
-**query_instructions:** `typing.Optional[str]`
-
-
-
+```python
+from gooey import Gooey
-
--
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.rag()
-**selected_model:** `typing.Optional[RelatedQnAPageRequestSelectedModel]`
-
+```
+
+
+#### ⚙️ Parameters
+
-
-**max_search_urls:** `typing.Optional[int]`
-
-
-
-
-
-**max_references:** `typing.Optional[int]`
+**example_id:** `typing.Optional[str]`
@@ -589,60 +527,53 @@ client.seo_people_also_ask(
-
-**max_context_words:** `typing.Optional[int]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**scroll_jump:** `typing.Optional[int]`
-
+
+client.doc_summary(...)
-
-**embedding_model:** `typing.Optional[RelatedQnAPageRequestEmbeddingModel]`
-
-
-
+#### 🔌 Usage
-
-**dense_weight:** `typing.Optional[float]`
+
+-
+```python
+from gooey import Gooey
-Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
-Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
-
-
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.doc_summary()
+
+```
-
-
--
-
-**avoid_repetition:** `typing.Optional[bool]`
-
+#### ⚙️ Parameters
+
-
-**num_outputs:** `typing.Optional[int]`
-
-
-
-
-
-**quality:** `typing.Optional[float]`
+**example_id:** `typing.Optional[str]`
@@ -650,63 +581,53 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-
-**max_tokens:** `typing.Optional[int]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
--
-
-**sampling_temperature:** `typing.Optional[float]`
-
-
--
-**response_format_type:** `typing.Optional[RelatedQnAPageRequestResponseFormatType]`
-
+
+client.lipsync_tts(...)
-
-**serp_search_location:** `typing.Optional[SerpSearchLocation]`
-
-
-
+#### 🔌 Usage
-
-**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead
-
-
-
-
-
-**serp_search_type:** `typing.Optional[SerpSearchType]`
-
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.lipsync_tts()
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead
-
-
-
-
-
-**settings:** `typing.Optional[RunSettings]`
+**example_id:** `typing.Optional[str]`
@@ -726,7 +647,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-client.seo_content(...)
+client.text_to_speech(...)
-
@@ -744,12 +665,7 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.seo_content(
- search_query="search_query",
- keywords="keywords",
- title="title",
- company_url="company_url",
-)
+client.text_to_speech()
```
@@ -765,7 +681,7 @@ client.seo_content(
-
-**search_query:** `str`
+**example_id:** `typing.Optional[str]`
@@ -773,55 +689,53 @@ client.seo_content(
-
-**keywords:** `str`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**title:** `str`
-
+
+client.speech_recognition(...)
-
-**company_url:** `str`
-
-
-
+#### 🔌 Usage
-
-**example_id:** `typing.Optional[str]`
-
-
-
-
-
-**task_instructions:** `typing.Optional[str]`
-
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.speech_recognition()
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**enable_html:** `typing.Optional[bool]`
-
-
-
-
-
-**selected_model:** `typing.Optional[SeoSummaryPageRequestSelectedModel]`
+**example_id:** `typing.Optional[str]`
@@ -829,55 +743,53 @@ client.seo_content(
-
-**max_search_urls:** `typing.Optional[int]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**enable_crosslinks:** `typing.Optional[bool]`
-
+
+client.text_to_music(...)
-
-**seed:** `typing.Optional[int]`
-
-
-
+#### 🔌 Usage
-
-**avoid_repetition:** `typing.Optional[bool]`
-
-
-
-
-
-**num_outputs:** `typing.Optional[int]`
-
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.text_to_music()
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**quality:** `typing.Optional[float]`
-
-
-
-
-
-**max_tokens:** `typing.Optional[int]`
+**example_id:** `typing.Optional[str]`
@@ -885,55 +797,53 @@ client.seo_content(
-
-**sampling_temperature:** `typing.Optional[float]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**response_format_type:** `typing.Optional[SeoSummaryPageRequestResponseFormatType]`
-
+
+client.translate(...)
-
-**serp_search_location:** `typing.Optional[SerpSearchLocation]`
-
-
-
+#### 🔌 Usage
-
-**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead
-
-
-
-
-
-**serp_search_type:** `typing.Optional[SerpSearchType]`
-
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.translate()
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead
-
-
-
-
-
-**settings:** `typing.Optional[RunSettings]`
+**example_id:** `typing.Optional[str]`
@@ -953,7 +863,7 @@ client.seo_content(
-client.web_search_llm(...)
+client.remix_image(...)
-
@@ -971,10 +881,7 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.web_search_llm(
- search_query="search_query",
- site_filter="site_filter",
-)
+client.remix_image()
```
@@ -990,7 +897,7 @@ client.web_search_llm(
-
-**search_query:** `str`
+**example_id:** `typing.Optional[str]`
@@ -998,39 +905,53 @@ client.web_search_llm(
-
-**site_filter:** `str`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**example_id:** `typing.Optional[str]`
-
+
+client.text_to_image(...)
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
-
-
-
+#### 🔌 Usage
-
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
-
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.text_to_image()
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**task_instructions:** `typing.Optional[str]`
+
+-
+
+**example_id:** `typing.Optional[str]`
@@ -1038,84 +959,53 @@ client.web_search_llm(
-
-**query_instructions:** `typing.Optional[str]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
--
-
-**selected_model:** `typing.Optional[GoogleGptPageRequestSelectedModel]`
-
-
--
-**max_search_urls:** `typing.Optional[int]`
-
+
+client.product_image(...)
-
-**max_references:** `typing.Optional[int]`
-
-
-
+#### 🔌 Usage
-
-**max_context_words:** `typing.Optional[int]`
-
-
-
-
-
-**scroll_jump:** `typing.Optional[int]`
-
-
-
+```python
+from gooey import Gooey
-
--
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.product_image()
-**embedding_model:** `typing.Optional[GoogleGptPageRequestEmbeddingModel]`
-
+```
-
-
--
-
-**dense_weight:** `typing.Optional[float]`
-
-
-Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
-Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
-
-
+#### ⚙️ Parameters
+
-
-**avoid_repetition:** `typing.Optional[bool]`
-
-
-
-
-
-**num_outputs:** `typing.Optional[int]`
+**example_id:** `typing.Optional[str]`
@@ -1123,71 +1013,53 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-
-**quality:** `typing.Optional[float]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
--
-
-**max_tokens:** `typing.Optional[int]`
-
-
--
-**sampling_temperature:** `typing.Optional[float]`
-
+
+client.portrait(...)
-
-**response_format_type:** `typing.Optional[GoogleGptPageRequestResponseFormatType]`
-
-
-
+#### 🔌 Usage
-
-**serp_search_location:** `typing.Optional[SerpSearchLocation]`
-
-
-
-
-
-**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead
-
-
-
+```python
+from gooey import Gooey
-
--
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.portrait()
-**serp_search_type:** `typing.Optional[SerpSearchType]`
-
+```
+
+
+#### ⚙️ Parameters
+
-
-**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead
-
-
-
-
-
-**settings:** `typing.Optional[RunSettings]`
+**example_id:** `typing.Optional[str]`
@@ -1207,7 +1079,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-client.personalize_email(...)
+client.image_from_email(...)
-
@@ -1225,9 +1097,7 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.personalize_email(
- email_address="email_address",
-)
+client.image_from_email()
```
@@ -1243,7 +1113,7 @@ client.personalize_email(
-
-**email_address:** `str`
+**example_id:** `typing.Optional[str]`
@@ -1251,55 +1121,53 @@ client.personalize_email(
-
-**example_id:** `typing.Optional[str]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
-
+
+client.image_from_web_search(...)
-
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
-
-
-
+#### 🔌 Usage
-
-**input_prompt:** `typing.Optional[str]`
-
-
-
-
-
-**selected_model:** `typing.Optional[SocialLookupEmailPageRequestSelectedModel]`
-
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.image_from_web_search()
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**avoid_repetition:** `typing.Optional[bool]`
-
-
-
-
-
-**num_outputs:** `typing.Optional[int]`
+**example_id:** `typing.Optional[str]`
@@ -1307,39 +1175,53 @@ client.personalize_email(
-
-**quality:** `typing.Optional[float]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**max_tokens:** `typing.Optional[int]`
-
+
+client.remove_background(...)
-
-**sampling_temperature:** `typing.Optional[float]`
-
-
-
+#### 🔌 Usage
-
-**response_format_type:** `typing.Optional[SocialLookupEmailPageRequestResponseFormatType]`
-
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.remove_background()
+
+```
+
+
+#### ⚙️ Parameters
+
+
+-
+
-
-**settings:** `typing.Optional[RunSettings]`
+**example_id:** `typing.Optional[str]`
@@ -1359,7 +1241,7 @@ client.personalize_email(
-client.bulk_run(...)
+client.upscale(...)
-
@@ -1377,12 +1259,7 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.bulk_run(
- documents=["documents"],
- run_urls=["run_urls"],
- input_columns={"key": "value"},
- output_columns={"key": "value"},
-)
+client.upscale()
```
@@ -1398,13 +1275,7 @@ client.bulk_run(
-
-**documents:** `typing.Sequence[str]`
-
-
-Upload or link to a CSV or google sheet that contains your sample input data.
-For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs.
-Remember to includes header names in your CSV too.
-
+**example_id:** `typing.Optional[str]`
@@ -1412,80 +1283,53 @@ Remember to includes header names in your CSV too.
-
-**run_urls:** `typing.Sequence[str]`
-
-
-Provide one or more Gooey.AI workflow runs.
-You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them.
-
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
--
-
-**input_columns:** `typing.Dict[str, str]`
-
-
-For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it.
-
-
-
--
-
-**output_columns:** `typing.Dict[str, str]`
-
-For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data.
-
-
+
+client.embed(...)
-
-**example_id:** `typing.Optional[str]`
-
-
-
+#### 🔌 Usage
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
-
-
-
-
-
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
-
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.embed()
+
+```
+
+
+
+#### ⚙️ Parameters
-
-**eval_urls:** `typing.Optional[typing.Sequence[str]]`
-
-
-_(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs.
-
-
-
-
-
-
-**settings:** `typing.Optional[RunSettings]`
+**example_id:** `typing.Optional[str]`
@@ -1505,7 +1349,7 @@ _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the result
-client.synthesize_data(...)
+client.seo_people_also_ask_doc(...)
-
@@ -1523,9 +1367,7 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.synthesize_data(
- documents=["documents"],
-)
+client.seo_people_also_ask_doc()
```
@@ -1541,7 +1383,7 @@ client.synthesize_data(
-
-**documents:** `typing.Sequence[str]`
+**example_id:** `typing.Optional[str]`
@@ -1549,134 +1391,141 @@ client.synthesize_data(
-
-**example_id:** `typing.Optional[str]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
-
+
+client.health_status_get()
-
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
-
-
-
+#### 🔌 Usage
-
-**sheet_url:** `typing.Optional[str]`
-
-
-
-
-
-**selected_asr_model:** `typing.Optional[DocExtractPageRequestSelectedAsrModel]`
-
-
-
+```python
+from gooey import Gooey
-
--
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.health_status_get()
-**google_translate_target:** `typing.Optional[str]`
-
+```
+
+
+
+#### ⚙️ Parameters
-
-**glossary_document:** `typing.Optional[str]`
+
+-
-Provide a glossary to customize translation and improve accuracy of domain-specific terms.
-If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**task_instructions:** `typing.Optional[str]`
-
+
+client.post_v3chyron_plant_async()
-
-**selected_model:** `typing.Optional[DocExtractPageRequestSelectedModel]`
-
-
-
+#### 🔌 Usage
-
-**avoid_repetition:** `typing.Optional[bool]`
-
-
-
-
-
-**num_outputs:** `typing.Optional[int]`
-
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3chyron_plant_async()
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**quality:** `typing.Optional[float]`
-
-
-
-
-
-**max_tokens:** `typing.Optional[int]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**sampling_temperature:** `typing.Optional[float]`
-
+
+client.post_v3compare_llm_async()
-
-**response_format_type:** `typing.Optional[DocExtractPageRequestResponseFormatType]`
-
-
-
+#### 🔌 Usage
-
-**settings:** `typing.Optional[RunSettings]`
-
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3compare_llm_async()
+
+```
+
+
+#### ⚙️ Parameters
+
+
+-
+
-
@@ -1692,7 +1541,7 @@ If not specified or invalid, no glossary will be used. Read about the expected f
-client.llm(...)
+client.post_v3compare_text2img_async()
-
@@ -1710,7 +1559,7 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.llm()
+client.post_v3compare_text2img_async()
```
@@ -1726,99 +1575,95 @@ client.llm()
-
-**example_id:** `typing.Optional[str]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
-
+
+client.post_v3deforum_sd_async()
-
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
-
-
-
+#### 🔌 Usage
-
-**input_prompt:** `typing.Optional[str]`
-
-
-
-
-
-**selected_models:** `typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]]`
-
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3deforum_sd_async()
+
+```
+
+
+
+#### ⚙️ Parameters
-
-**avoid_repetition:** `typing.Optional[bool]`
-
-
-
-
-
-**num_outputs:** `typing.Optional[int]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**quality:** `typing.Optional[float]`
-
+
+client.post_v3email_face_inpainting_async()
-
-**max_tokens:** `typing.Optional[int]`
-
-
-
+#### 🔌 Usage
-
-**sampling_temperature:** `typing.Optional[float]`
-
-
-
-
-
-**response_format_type:** `typing.Optional[CompareLlmPageRequestResponseFormatType]`
-
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3email_face_inpainting_async()
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**settings:** `typing.Optional[RunSettings]`
-
-
-
-
-
@@ -1834,7 +1679,7 @@ client.llm()
-client.rag(...)
+client.post_v3face_inpainting_async()
-
@@ -1852,9 +1697,7 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.rag(
- search_query="search_query",
-)
+client.post_v3face_inpainting_async()
```
@@ -1870,192 +1713,187 @@ client.rag(
-
-**search_query:** `str`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
--
-
-**example_id:** `typing.Optional[str]`
-
-
--
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
-
+
+client.post_v3google_image_gen_async()
-
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
-
-
-
+#### 🔌 Usage
-
-**keyword_query:** `typing.Optional[DocSearchPageRequestKeywordQuery]`
-
-
-
-
-
-**documents:** `typing.Optional[typing.Sequence[str]]`
-
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3google_image_gen_async()
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**max_references:** `typing.Optional[int]`
-
-
-
-
-
-**max_context_words:** `typing.Optional[int]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**scroll_jump:** `typing.Optional[int]`
-
+
+client.post_v3image_segmentation_async()
-
-**doc_extract_url:** `typing.Optional[str]`
-
-
-
+#### 🔌 Usage
-
-**embedding_model:** `typing.Optional[DocSearchPageRequestEmbeddingModel]`
-
-
-
-
-
-**dense_weight:** `typing.Optional[float]`
+```python
+from gooey import Gooey
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3image_segmentation_async()
-Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
-Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
-
-
+```
+
+
+#### ⚙️ Parameters
+
-
-**task_instructions:** `typing.Optional[str]`
-
-
-
-
-
-**query_instructions:** `typing.Optional[str]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**selected_model:** `typing.Optional[DocSearchPageRequestSelectedModel]`
-
+
+client.post_v3img2img_async()
-
-**citation_style:** `typing.Optional[DocSearchPageRequestCitationStyle]`
-
-
-
+#### 🔌 Usage
-
-**avoid_repetition:** `typing.Optional[bool]`
-
-
-
-
-
-**num_outputs:** `typing.Optional[int]`
-
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3img2img_async()
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**quality:** `typing.Optional[float]`
-
-
-
-
-
-**max_tokens:** `typing.Optional[int]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**sampling_temperature:** `typing.Optional[float]`
-
+
+client.post_v3letter_writer_async()
-
-**response_format_type:** `typing.Optional[DocSearchPageRequestResponseFormatType]`
-
-
-
+#### 🔌 Usage
-
-**settings:** `typing.Optional[RunSettings]`
-
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3letter_writer_async()
+
+```
+
+
+#### ⚙️ Parameters
+
+
+-
+
-
@@ -2071,7 +1909,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-client.doc_summary(...)
+client.post_v3lipsync_async()
-
@@ -2089,9 +1927,7 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.doc_summary(
- documents=["documents"],
-)
+client.post_v3lipsync_async()
```
@@ -2107,139 +1943,141 @@ client.doc_summary(
-
-**documents:** `typing.Sequence[str]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**example_id:** `typing.Optional[str]`
-
+
+client.post_v3lipsync_tts_async()
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
-
-
-
+#### 🔌 Usage
-
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
-
-
-
-
-
-**task_instructions:** `typing.Optional[str]`
-
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3lipsync_tts_async()
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**merge_instructions:** `typing.Optional[str]`
-
-
-
-
-
-**selected_model:** `typing.Optional[DocSummaryPageRequestSelectedModel]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**chain_type:** `typing.Optional[typing.Literal["map_reduce"]]`
-
+
+client.post_v3object_inpainting_async()
-
-**selected_asr_model:** `typing.Optional[DocSummaryPageRequestSelectedAsrModel]`
-
-
-
+#### 🔌 Usage
-
-**google_translate_target:** `typing.Optional[str]`
-
-
-
-
-
-**avoid_repetition:** `typing.Optional[bool]`
-
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3object_inpainting_async()
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**num_outputs:** `typing.Optional[int]`
-
-
-
-
-
-**quality:** `typing.Optional[float]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**max_tokens:** `typing.Optional[int]`
-
+
+client.post_v3seo_summary_async()
-
-**sampling_temperature:** `typing.Optional[float]`
-
-
-
+#### 🔌 Usage
-
-**response_format_type:** `typing.Optional[DocSummaryPageRequestResponseFormatType]`
-
-
-
-
-
-**settings:** `typing.Optional[RunSettings]`
-
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3seo_summary_async()
+
+```
+
+
+#### ⚙️ Parameters
+
+
+-
+
-
@@ -2255,7 +2093,7 @@ client.doc_summary(
-client.lipsync_tts(...)
+client.post_v3smart_gpt_async()
-
@@ -2273,9 +2111,7 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.lipsync_tts(
- text_prompt="text_prompt",
-)
+client.post_v3smart_gpt_async()
```
@@ -2291,243 +2127,233 @@ client.lipsync_tts(
-
-**text_prompt:** `str`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
--
-
-**example_id:** `typing.Optional[str]`
-
-
--
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
-
+
+client.post_v3social_lookup_email_async()
-
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
-
-
-
+#### 🔌 Usage
-
-**tts_provider:** `typing.Optional[LipsyncTtsPageRequestTtsProvider]`
-
-
-
-
-
-**uberduck_voice_name:** `typing.Optional[str]`
-
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3social_lookup_email_async()
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**uberduck_speaking_rate:** `typing.Optional[float]`
-
-
-
-
-
-**google_voice_name:** `typing.Optional[str]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**google_speaking_rate:** `typing.Optional[float]`
-
+
+client.post_v3text_to_speech_async()
-
-**google_pitch:** `typing.Optional[float]`
-
-
-
+#### 🔌 Usage
-
-**bark_history_prompt:** `typing.Optional[str]`
-
-
-
-
-
-**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead
-
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3text_to_speech_async()
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**elevenlabs_api_key:** `typing.Optional[str]`
-
-
-
-
-
-**elevenlabs_voice_id:** `typing.Optional[str]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**elevenlabs_model:** `typing.Optional[str]`
-
+
+client.post_v3art_qr_code_async()
-
-**elevenlabs_stability:** `typing.Optional[float]`
-
-
-
+#### 🔌 Usage
-
-**elevenlabs_similarity_boost:** `typing.Optional[float]`
-
-
-
-
-
-**elevenlabs_style:** `typing.Optional[float]`
-
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3art_qr_code_async()
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**elevenlabs_speaker_boost:** `typing.Optional[bool]`
-
-
-
-
-
-**azure_voice_name:** `typing.Optional[str]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**openai_voice_name:** `typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName]`
-
+
+client.post_v3asr_async()
-
-**openai_tts_model:** `typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel]`
-
-
-
+#### 🔌 Usage
-
-**input_face:** `typing.Optional[str]`
-
-
-
-
-
-**face_padding_top:** `typing.Optional[int]`
-
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3asr_async()
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**face_padding_bottom:** `typing.Optional[int]`
-
-
-
-
-
-**face_padding_left:** `typing.Optional[int]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**face_padding_right:** `typing.Optional[int]`
-
+
+client.post_v3bulk_eval_async()
-
-**sadtalker_settings:** `typing.Optional[SadTalkerSettings]`
-
-
-
+#### 🔌 Usage
-
-**selected_model:** `typing.Optional[LipsyncTtsPageRequestSelectedModel]`
-
-
-
-
-
-**settings:** `typing.Optional[RunSettings]`
-
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3bulk_eval_async()
+
+```
+
+
+#### ⚙️ Parameters
+
+
+-
+
-
@@ -2543,7 +2369,7 @@ client.lipsync_tts(
-client.text_to_speech(...)
+client.post_v3bulk_runner_async()
-
@@ -2561,9 +2387,7 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.text_to_speech(
- text_prompt="text_prompt",
-)
+client.post_v3bulk_runner_async()
```
@@ -2579,187 +2403,187 @@ client.text_to_speech(
-
-**text_prompt:** `str`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**example_id:** `typing.Optional[str]`
-
+
+client.post_v3compare_ai_upscalers_async()
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
-
-
-
+#### 🔌 Usage
-
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
-
-
-
-
-
-**tts_provider:** `typing.Optional[TextToSpeechPageRequestTtsProvider]`
-
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3compare_ai_upscalers_async()
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**uberduck_voice_name:** `typing.Optional[str]`
-
-
-
-
-
-**uberduck_speaking_rate:** `typing.Optional[float]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**google_voice_name:** `typing.Optional[str]`
-
+
+client.post_v3doc_extract_async()
-
-**google_speaking_rate:** `typing.Optional[float]`
-
-
-
+#### 🔌 Usage
-
-**google_pitch:** `typing.Optional[float]`
-
-
-
-
-
-**bark_history_prompt:** `typing.Optional[str]`
-
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3doc_extract_async()
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead
-
-
-
-
-
-**elevenlabs_api_key:** `typing.Optional[str]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**elevenlabs_voice_id:** `typing.Optional[str]`
-
+
+client.post_v3doc_search_async()
-
-**elevenlabs_model:** `typing.Optional[str]`
-
-
-
+#### 🔌 Usage
-
-**elevenlabs_stability:** `typing.Optional[float]`
-
-
-
-
-
-**elevenlabs_similarity_boost:** `typing.Optional[float]`
-
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3doc_search_async()
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**elevenlabs_style:** `typing.Optional[float]`
-
-
-
-
-
-**elevenlabs_speaker_boost:** `typing.Optional[bool]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**azure_voice_name:** `typing.Optional[str]`
-
+
+client.post_v3doc_summary_async()
-
-**openai_voice_name:** `typing.Optional[TextToSpeechPageRequestOpenaiVoiceName]`
-
-
-
+#### 🔌 Usage
-
-**openai_tts_model:** `typing.Optional[TextToSpeechPageRequestOpenaiTtsModel]`
-
-
-
-
-
-**settings:** `typing.Optional[RunSettings]`
-
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3doc_summary_async()
+
+```
+
+
+#### ⚙️ Parameters
+
+
+-
+
-
@@ -2775,7 +2599,7 @@ client.text_to_speech(
-client.speech_recognition(...)
+client.post_v3embeddings_async()
-
@@ -2793,9 +2617,7 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.speech_recognition(
- documents=["documents"],
-)
+client.post_v3embeddings_async()
```
@@ -2811,110 +2633,49 @@ client.speech_recognition(
-
-**documents:** `typing.Sequence[str]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**example_id:** `typing.Optional[str]`
-
+
+client.post_v3functions_async()
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
-
-
-
+#### 🔌 Usage
-
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
-
-
-
-
-
--
-
-**selected_model:** `typing.Optional[AsrPageRequestSelectedModel]`
-
-
-
-
-
--
-
-**language:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**translation_model:** `typing.Optional[AsrPageRequestTranslationModel]`
-
-
-
-
-
--
-
-**output_format:** `typing.Optional[AsrPageRequestOutputFormat]`
-
-
-
-
-
-**google_translate_target:** `typing.Optional[str]` — use `translation_model` & `translation_target` instead.
-
-
-
+```python
+from gooey import Gooey
-
--
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3functions_async()
-**translation_source:** `typing.Optional[str]`
-
+```
-
-
--
-
-**translation_target:** `typing.Optional[str]`
-
-
--
-
-**glossary_document:** `typing.Optional[str]`
-
-Provide a glossary to customize translation and improve accuracy of domain-specific terms.
-If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
-
-
-
+#### ⚙️ Parameters
-
-**settings:** `typing.Optional[RunSettings]`
-
-
-
-
-
@@ -2930,7 +2691,7 @@ If not specified or invalid, no glossary will be used. Read about the expected f
-client.text_to_music(...)
+client.post_v3google_gpt_async()
-
@@ -2948,9 +2709,7 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.text_to_music(
- text_prompt="text_prompt",
-)
+client.post_v3google_gpt_async()
```
@@ -2966,110 +2725,6 @@ client.text_to_music(
-
-**text_prompt:** `str`
-
-
-
-
-
--
-
-**example_id:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
-
-
-
-
-
--
-
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
-
-
-
-
-
--
-
-**negative_prompt:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**duration_sec:** `typing.Optional[float]`
-
-
-
-
-
--
-
-**num_outputs:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**quality:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**guidance_scale:** `typing.Optional[float]`
-
-
-
-
-
--
-
-**seed:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**sd2upscaling:** `typing.Optional[bool]`
-
-
-
-
-
--
-
-**selected_models:** `typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]]`
-
-
-
-
-
--
-
-**settings:** `typing.Optional[RunSettings]`
-
-
-
-
-
--
-
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -3082,7 +2737,7 @@ client.text_to_music(
-client.translate(...)
+client.post_v3related_qna_maker_doc_async()
-
@@ -3100,7 +2755,7 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.translate()
+client.post_v3related_qna_maker_doc_async()
```
@@ -3116,81 +2771,6 @@ client.translate()
-
-**example_id:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
-
-
-
-
-
--
-
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
-
-
-
-
-
--
-
-**texts:** `typing.Optional[typing.Sequence[str]]`
-
-
-
-
-
--
-
-**selected_model:** `typing.Optional[TranslationPageRequestSelectedModel]`
-
-
-
-
-
--
-
-**translation_source:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**translation_target:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**glossary_document:** `typing.Optional[str]`
-
-Provide a glossary to customize translation and improve accuracy of domain-specific terms.
-If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
-
-
-
-
-
--
-
-**settings:** `typing.Optional[RunSettings]`
-
-
-
-
-
--
-
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -3203,7 +2783,7 @@ If not specified or invalid, no glossary will be used. Read about the expected f
-client.remix_image(...)
+client.post_v3related_qna_maker_async()
-
@@ -3221,9 +2801,7 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.remix_image(
- input_image="input_image",
-)
+client.post_v3related_qna_maker_async()
```
@@ -3239,2838 +2817,194 @@ client.remix_image(
-
-**input_image:** `str`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
--
-
-**example_id:** `typing.Optional[str]`
-
-
--
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
-
+
+client.post_v3text2audio_async()
-
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
-
-
-
+#### 🔌 Usage
-
-**text_prompt:** `typing.Optional[str]`
-
-
-
-
-
-**selected_model:** `typing.Optional[Img2ImgPageRequestSelectedModel]`
-
-
-
-
-
--
-
-**selected_controlnet_model:** `typing.Optional[Img2ImgPageRequestSelectedControlnetModel]`
-
-
-
-
-
--
-
-**negative_prompt:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**num_outputs:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**quality:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**output_width:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**output_height:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**guidance_scale:** `typing.Optional[float]`
-
-
-
-
-
--
-
-**prompt_strength:** `typing.Optional[float]`
-
-
-
-
-
--
-
-**controlnet_conditioning_scale:** `typing.Optional[typing.Sequence[float]]`
-
-
-
-
-
--
-
-**seed:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**image_guidance_scale:** `typing.Optional[float]`
-
-
-
-
-
--
-
-**settings:** `typing.Optional[RunSettings]`
-
-
-
-
-
--
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
-
-
-
-
-
-
-
-
-
-client.text_to_image(...)
-
--
-
-#### 🔌 Usage
-
-
--
-
-
--
-
-```python
-from gooey import Gooey
-
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.text_to_image(
- text_prompt="text_prompt",
-)
-
-```
-
-
-
-
-
-#### ⚙️ Parameters
-
-
--
-
-
--
-
-**text_prompt:** `str`
-
-
-
-
-
--
-
-**example_id:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
-
-
-
-
-
--
-
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
-
-
-
-
-
--
-
-**negative_prompt:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**output_width:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**output_height:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**num_outputs:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**quality:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**dall_e3quality:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**dall_e3style:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**guidance_scale:** `typing.Optional[float]`
-
-
-
-
-
--
-
-**seed:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**sd2upscaling:** `typing.Optional[bool]`
-
-
-
-
-
--
-
-**selected_models:** `typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]]`
-
-
-
-
-
--
-
-**scheduler:** `typing.Optional[CompareText2ImgPageRequestScheduler]`
-
-
-
-
-
--
-
-**edit_instruction:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**image_guidance_scale:** `typing.Optional[float]`
-
-
-
-
-
--
-
-**settings:** `typing.Optional[RunSettings]`
-
-
-
-
-
--
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
-
-
-
-
-
-
-
-
-
-client.product_image(...)
-
--
-
-#### 🔌 Usage
-
-
--
-
-
--
-
-```python
-from gooey import Gooey
-
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.product_image(
- input_image="input_image",
- text_prompt="text_prompt",
-)
-
-```
-
-
-
-
-
-#### ⚙️ Parameters
-
-
--
-
-
--
-
-**input_image:** `str`
-
-
-
-
-
--
-
-**text_prompt:** `str`
-
-
-
-
-
--
-
-**example_id:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
-
-
-
-
-
--
-
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
-
-
-
-
-
--
-
-**obj_scale:** `typing.Optional[float]`
-
-
-
-
-
--
-
-**obj_pos_x:** `typing.Optional[float]`
-
-
-
-
-
--
-
-**obj_pos_y:** `typing.Optional[float]`
-
-
-
-
-
--
-
-**mask_threshold:** `typing.Optional[float]`
-
-
-
-
-
--
-
-**selected_model:** `typing.Optional[ObjectInpaintingPageRequestSelectedModel]`
-
-
-
-
-
--
-
-**negative_prompt:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**num_outputs:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**quality:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**output_width:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**output_height:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**guidance_scale:** `typing.Optional[float]`
-
-
-
-
-
--
-
-**sd2upscaling:** `typing.Optional[bool]`
-
-
-
-
-
--
-
-**seed:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**settings:** `typing.Optional[RunSettings]`
-
-
-
-
-
--
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
-
-
-
-
-
-
-
-
-
-client.portrait(...)
-
--
-
-#### 🔌 Usage
-
-
--
-
-
--
-
-```python
-from gooey import Gooey
-
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.portrait(
- input_image="input_image",
- text_prompt="tony stark from the iron man",
-)
-
-```
-
-
-
-
-
-#### ⚙️ Parameters
-
-
--
-
-
--
-
-**input_image:** `str`
-
-
-
-
-
--
-
-**text_prompt:** `str`
-
-
-
-
-
--
-
-**example_id:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
-
-
-
-
-
--
-
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
-
-
-
-
-
--
-
-**face_scale:** `typing.Optional[float]`
-
-
-
-
-
--
-
-**face_pos_x:** `typing.Optional[float]`
-
-
-
-
-
--
-
-**face_pos_y:** `typing.Optional[float]`
-
-
-
-
-
--
-
-**selected_model:** `typing.Optional[FaceInpaintingPageRequestSelectedModel]`
-
-
-
-
-
--
-
-**negative_prompt:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**num_outputs:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**quality:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**upscale_factor:** `typing.Optional[float]`
-
-
-
-
-
--
-
-**output_width:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**output_height:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**guidance_scale:** `typing.Optional[float]`
-
-
-
-
-
--
-
-**seed:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**settings:** `typing.Optional[RunSettings]`
-
-
-
-
-
--
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
-
-
-
-
-
-
-
-
-
-client.image_from_email(...)
-
--
-
-#### 🔌 Usage
-
-
--
-
-
--
-
-```python
-from gooey import Gooey
-
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.image_from_email(
- email_address="sean@dara.network",
- text_prompt="winter's day in paris",
-)
-
-```
-
-
-
-
-
-#### ⚙️ Parameters
-
-
--
-
-
--
-
-**text_prompt:** `str`
-
-
-
-
-
--
-
-**example_id:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
-
-
-
-
-
--
-
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
-
-
-
-
-
--
-
-**email_address:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**twitter_handle:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**face_scale:** `typing.Optional[float]`
-
-
-
-
-
--
-
-**face_pos_x:** `typing.Optional[float]`
-
-
-
-
-
--
-
-**face_pos_y:** `typing.Optional[float]`
-
-
-
-
-
--
-
-**selected_model:** `typing.Optional[EmailFaceInpaintingPageRequestSelectedModel]`
-
-
-
-
-
--
-
-**negative_prompt:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**num_outputs:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**quality:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**upscale_factor:** `typing.Optional[float]`
-
-
-
-
-
--
-
-**output_width:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**output_height:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**guidance_scale:** `typing.Optional[float]`
-
-
-
-
-
--
-
-**should_send_email:** `typing.Optional[bool]`
-
-
-
-
-
--
-
-**email_from:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**email_cc:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**email_bcc:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**email_subject:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**email_body:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**email_body_enable_html:** `typing.Optional[bool]`
-
-
-
-
-
--
-
-**fallback_email_body:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**seed:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**settings:** `typing.Optional[RunSettings]`
-
-
-
-
-
--
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
-
-
-
-
-
-
-
-
-
-client.image_from_web_search(...)
-
--
-
-#### 🔌 Usage
-
-
--
-
-
--
-
-```python
-from gooey import Gooey
-
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.image_from_web_search(
- search_query="search_query",
- text_prompt="text_prompt",
-)
-
-```
-
-
-
-
-
-#### ⚙️ Parameters
-
-
--
-
-
--
-
-**search_query:** `str`
-
-
-
-
-
--
-
-**text_prompt:** `str`
-
-
-
-
-
--
-
-**example_id:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
-
-
-
-
-
--
-
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
-
-
-
-
-
--
-
-**serp_search_location:** `typing.Optional[SerpSearchLocation]`
-
-
-
-
-
--
-
-**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead
-
-
-
-
-
--
-
-**selected_model:** `typing.Optional[GoogleImageGenPageRequestSelectedModel]`
-
-
-
-
-
--
-
-**negative_prompt:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**num_outputs:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**quality:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**guidance_scale:** `typing.Optional[float]`
-
-
-
-
-
--
-
-**prompt_strength:** `typing.Optional[float]`
-
-
-
-
-
--
-
-**sd2upscaling:** `typing.Optional[bool]`
-
-
-
-
-
--
-
-**seed:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**image_guidance_scale:** `typing.Optional[float]`
-
-
-
-
-
--
-
-**settings:** `typing.Optional[RunSettings]`
-
-
-
-
-
--
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
-
-
-
-
-
-
-
-
-
-client.remove_background(...)
-
--
-
-#### 🔌 Usage
-
-
--
-
-
--
-
-```python
-from gooey import Gooey
-
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.remove_background(
- input_image="input_image",
-)
-
-```
-
-
-
-
-
-#### ⚙️ Parameters
-
-
--
-
-
--
-
-**input_image:** `str`
-
-
-
-
-
--
-
-**example_id:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
-
-
-
-
-
--
-
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
-
-
-
-
-
--
-
-**selected_model:** `typing.Optional[ImageSegmentationPageRequestSelectedModel]`
-
-
-
-
-
--
-
-**mask_threshold:** `typing.Optional[float]`
-
-
-
-
-
--
-
-**rect_persepective_transform:** `typing.Optional[bool]`
-
-
-
-
-
--
-
-**reflection_opacity:** `typing.Optional[float]`
-
-
-
-
-
--
-
-**obj_scale:** `typing.Optional[float]`
-
-
-
-
-
--
-
-**obj_pos_x:** `typing.Optional[float]`
-
-
-
-
-
--
-
-**obj_pos_y:** `typing.Optional[float]`
-
-
-
-
-
--
-
-**settings:** `typing.Optional[RunSettings]`
-
-
-
-
-
--
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
-
-
-
-
-
-
-
-
-
-client.upscale(...)
-
--
-
-#### 🔌 Usage
-
-
--
-
-
--
-
-```python
-from gooey import Gooey
-
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.upscale(
- scale=1,
-)
-
-```
-
-
-
-
-
-#### ⚙️ Parameters
-
-
--
-
-
--
-
-**scale:** `int` — The final upsampling scale of the image
-
-
-
-
-
--
-
-**example_id:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
-
-
-
-
-
--
-
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
-
-
-
-
-
--
-
-**input_image:** `typing.Optional[str]` — Input Image
-
-
-
-
-
--
-
-**input_video:** `typing.Optional[str]` — Input Video
-
-
-
-
-
--
-
-**selected_models:** `typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]]`
-
-
-
-
-
--
-
-**selected_bg_model:** `typing.Optional[typing.Literal["real_esrgan_x2"]]`
-
-
-
-
-
--
-
-**settings:** `typing.Optional[RunSettings]`
-
-
-
-
-
--
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
-
-
-
-
-
-
-
-
-
-client.embed(...)
-
--
-
-#### 🔌 Usage
-
-
--
-
-
--
-
-```python
-from gooey import Gooey
-
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.embed(
- texts=["texts"],
-)
-
-```
-
-
-
-
-
-#### ⚙️ Parameters
-
-
--
-
-
--
-
-**texts:** `typing.Sequence[str]`
-
-
-
-
-
--
-
-**example_id:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
-
-
-
-
-
--
-
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
-
-
-
-
-
--
-
-**selected_model:** `typing.Optional[EmbeddingsPageRequestSelectedModel]`
-
-
-
-
-
--
-
-**settings:** `typing.Optional[RunSettings]`
-
-
-
-
-
--
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
-
-
-
-
-
-
-
-
-
-client.seo_people_also_ask_doc(...)
-
--
-
-#### 🔌 Usage
-
-
--
-
-
--
-
-```python
-from gooey import Gooey
-
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.seo_people_also_ask_doc(
- search_query="search_query",
-)
-
-```
-
-
-
-
-
-#### ⚙️ Parameters
-
-
--
-
-
--
-
-**search_query:** `str`
-
-
-
-
-
--
-
-**example_id:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
-
-
-
-
-
--
-
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
-
-
-
-
-
--
-
-**keyword_query:** `typing.Optional[RelatedQnADocPageRequestKeywordQuery]`
-
-
-
-
-
--
-
-**documents:** `typing.Optional[typing.Sequence[str]]`
-
-
-
-
-
--
-
-**max_references:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**max_context_words:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**scroll_jump:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**doc_extract_url:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**embedding_model:** `typing.Optional[RelatedQnADocPageRequestEmbeddingModel]`
-
-
-
-
-
--
-
-**dense_weight:** `typing.Optional[float]`
-
-
-Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
-Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
-
-
-
-
-
-
--
-
-**task_instructions:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**query_instructions:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**selected_model:** `typing.Optional[RelatedQnADocPageRequestSelectedModel]`
-
-
-
-
-
--
-
-**citation_style:** `typing.Optional[RelatedQnADocPageRequestCitationStyle]`
-
-
-
-
-
--
-
-**avoid_repetition:** `typing.Optional[bool]`
-
-
-
-
-
--
-
-**num_outputs:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**quality:** `typing.Optional[float]`
-
-
-
-
-
--
-
-**max_tokens:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**sampling_temperature:** `typing.Optional[float]`
-
-
-
-
-
--
-
-**response_format_type:** `typing.Optional[RelatedQnADocPageRequestResponseFormatType]`
-
-
-
-
-
--
-
-**serp_search_location:** `typing.Optional[SerpSearchLocation]`
-
-
-
-
-
--
-
-**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead
-
-
-
-
-
--
-
-**serp_search_type:** `typing.Optional[SerpSearchType]`
-
-
-
-
-
--
-
-**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead
-
-
-
-
-
--
-
-**settings:** `typing.Optional[RunSettings]`
-
-
-
-
-
--
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
-
-
-
-
-
-
-
-
-
-client.health_status_get()
-
--
-
-#### 🔌 Usage
-
-
--
-
-
--
-
-```python
-from gooey import Gooey
-
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.health_status_get()
-
-```
-
-
-
-
-
-#### ⚙️ Parameters
-
-
--
-
-
--
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
-
-
-
-
-
-
-
-
-
-## CopilotIntegrations
-client.copilot_integrations.video_bots_stream_create(...)
-
--
-
-#### 🔌 Usage
-
-
--
-
-
--
-
-```python
-from gooey import Gooey
-
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.copilot_integrations.video_bots_stream_create(
- integration_id="integration_id",
-)
-
-```
-
-
-
-
-
-#### ⚙️ Parameters
-
-
--
-
-
--
-
-**integration_id:** `str` — Your Integration ID as shown in the Copilot Integrations tab
-
-
-
-
-
--
-
-**conversation_id:** `typing.Optional[str]`
-
-The gooey conversation ID.
-
-If not provided, a new conversation will be started and a new ID will be returned in the response. Use this to maintain the state of the conversation between requests.
-
-Note that you may not provide a custom ID here, and must only use the `conversation_id` returned in a previous response.
-
-
-
-
-
--
-
-**user_id:** `typing.Optional[str]`
-
-Your app's custom user ID.
-
-If not provided, a random user will be created and a new ID will be returned in the response. If a `conversation_id` is provided, this field is automatically set to the user's id associated with that conversation.
-
-
-
-
-
--
-
-**user_message_id:** `typing.Optional[str]`
-
-Your app's custom message ID for the user message.
-
-If not provided, a random ID will be generated and returned in the response. This is useful for tracking messages in the conversation.
-
-
-
-
-
--
-
-**button_pressed:** `typing.Optional[ButtonPressed]` — The button that was pressed by the user.
-
-
-
-
-
--
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
-
-
-
-
-
--
-
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
-
-
-
-
-
--
-
-**input_prompt:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**input_audio:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**input_images:** `typing.Optional[typing.Sequence[str]]`
-
-
-
-
-
--
-
-**input_documents:** `typing.Optional[typing.Sequence[str]]`
-
-
-
-
-
--
-
-**doc_extract_url:** `typing.Optional[str]` — Select a workflow to extract text from documents and images.
-
-
-
-
-
--
-
-**messages:** `typing.Optional[typing.Sequence[ConversationEntry]]`
-
-
-
-
-
--
-
-**bot_script:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**selected_model:** `typing.Optional[CreateStreamRequestSelectedModel]`
-
-
-
-
-
--
-
-**document_model:** `typing.Optional[str]` — When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api))
-
-
-
-
-
--
-
-**task_instructions:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**query_instructions:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**keyword_instructions:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**documents:** `typing.Optional[typing.Sequence[str]]`
-
-
-
-
-
--
-
-**max_references:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**max_context_words:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**scroll_jump:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**embedding_model:** `typing.Optional[CreateStreamRequestEmbeddingModel]`
-
-
-
-
-
--
-
-**dense_weight:** `typing.Optional[float]`
-
-
-Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
-Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
-
-
-
-
-
-
--
-
-**citation_style:** `typing.Optional[CreateStreamRequestCitationStyle]`
-
-
-
-
-
--
-
-**use_url_shortener:** `typing.Optional[bool]`
-
-
-
-
-
--
-
-**asr_model:** `typing.Optional[CreateStreamRequestAsrModel]` — Choose a model to transcribe incoming audio messages to text.
-
-
-
-
-
--
-
-**asr_language:** `typing.Optional[str]` — Choose a language to transcribe incoming audio messages to text.
-
-
-
-
-
--
-
-**translation_model:** `typing.Optional[CreateStreamRequestTranslationModel]`
-
-
-
-
-
--
-
-**user_language:** `typing.Optional[str]` — Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages.
-
-
-
-
-
--
-
-**input_glossary_document:** `typing.Optional[str]`
-
-
-Translation Glossary for User Langauge -> LLM Language (English)
-
-
-
-
-
-
--
-
-**output_glossary_document:** `typing.Optional[str]`
-
-
-Translation Glossary for LLM Language (English) -> User Langauge
-
-
-
-
-
-
--
-
-**lipsync_model:** `typing.Optional[CreateStreamRequestLipsyncModel]`
-
-
-
-
-
--
-
-**tools:** `typing.Optional[typing.Sequence[LlmTools]]` — Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
-
-
-
-
-
--
-
-**avoid_repetition:** `typing.Optional[bool]`
-
-
-
-
-
--
-
-**num_outputs:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**quality:** `typing.Optional[float]`
-
-
-
-
-
--
-
-**max_tokens:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**sampling_temperature:** `typing.Optional[float]`
-
-
-
-
-
--
-
-**response_format_type:** `typing.Optional[CreateStreamRequestResponseFormatType]`
-
-
-
-
-
--
-
-**tts_provider:** `typing.Optional[CreateStreamRequestTtsProvider]`
-
-
-
-
-
--
-
-**uberduck_voice_name:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**uberduck_speaking_rate:** `typing.Optional[float]`
-
-
-
-
-
--
-
-**google_voice_name:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**google_speaking_rate:** `typing.Optional[float]`
-
-
-
-
-
--
-
-**google_pitch:** `typing.Optional[float]`
-
-
-
-
-
--
-
-**bark_history_prompt:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead
-
-
-
-
-
--
-
-**elevenlabs_api_key:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**elevenlabs_voice_id:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**elevenlabs_model:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**elevenlabs_stability:** `typing.Optional[float]`
-
-
-
-
-
--
-
-**elevenlabs_similarity_boost:** `typing.Optional[float]`
-
-
-
-
-
--
-
-**elevenlabs_style:** `typing.Optional[float]`
-
-
-
-
-
--
-
-**elevenlabs_speaker_boost:** `typing.Optional[bool]`
-
-
-
-
-
--
-
-**azure_voice_name:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**openai_voice_name:** `typing.Optional[CreateStreamRequestOpenaiVoiceName]`
-
-
-
-
-
--
-
-**openai_tts_model:** `typing.Optional[CreateStreamRequestOpenaiTtsModel]`
-
-
-
-
-
--
-
-**input_face:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**face_padding_top:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**face_padding_bottom:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**face_padding_left:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**face_padding_right:** `typing.Optional[int]`
-
-
-
-
-
--
-
-**sadtalker_settings:** `typing.Optional[SadTalkerSettings]`
-
-
-
-
-
--
-
-**input_text:** `typing.Optional[str]` — Use `input_prompt` instead
-
-
-
-
-
--
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
-
-
-
-
-
-
-
-
-
-client.copilot_integrations.video_bots_stream(...)
-
--
-
-#### 🔌 Usage
-
-
--
-
-
--
-
-```python
-from gooey import Gooey
-
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.copilot_integrations.video_bots_stream(
- request_id="request_id",
-)
-
-```
-
-
-
-
-
-#### ⚙️ Parameters
-
-
--
-
-
--
-
-**request_id:** `str`
-
-
-
-
-
--
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
-
-
-
-
-
-
-
-
-
-## CopilotForYourEnterprise
-client.copilot_for_your_enterprise.async_video_bots(...)
-
--
-
-#### 🔌 Usage
-
-
--
-
-
--
-
-```python
-from gooey import Gooey
+```python
+from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.copilot_for_your_enterprise.async_video_bots()
-
-```
-
-
-
-
-
-#### ⚙️ Parameters
-
-
--
-
-
--
-
-**example_id:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
-
-
-
-
-
--
-
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
-
-
-
-
-
--
-
-**input_prompt:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**input_audio:** `typing.Optional[str]`
-
-
-
-
-
--
-
-**input_images:** `typing.Optional[typing.Sequence[str]]`
-
-
-
-
-
--
-
-**input_documents:** `typing.Optional[typing.Sequence[str]]`
-
-
-
-
-
--
-
-**doc_extract_url:** `typing.Optional[str]` — Select a workflow to extract text from documents and images.
-
-
-
-
-
--
+client.post_v3text2audio_async()
-**messages:** `typing.Optional[typing.Sequence[ConversationEntry]]`
-
+```
+
+
+#### ⚙️ Parameters
+
-
-**bot_script:** `typing.Optional[str]`
-
-
-
-
-
-**selected_model:** `typing.Optional[VideoBotsPageRequestSelectedModel]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**document_model:** `typing.Optional[str]` — When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api))
-
+
+client.post_v3translate_async()
-
-**task_instructions:** `typing.Optional[str]`
-
-
-
+#### 🔌 Usage
-
-**query_instructions:** `typing.Optional[str]`
-
-
-
-
-
-**keyword_instructions:** `typing.Optional[str]`
-
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3translate_async()
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**documents:** `typing.Optional[typing.Sequence[str]]`
-
-
-
-
-
-**max_references:** `typing.Optional[int]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**max_context_words:** `typing.Optional[int]`
-
+
+client.post_v3video_bots_async()
-
-**scroll_jump:** `typing.Optional[int]`
-
-
-
+#### 🔌 Usage
-
-**embedding_model:** `typing.Optional[VideoBotsPageRequestEmbeddingModel]`
-
-
-
-
-
-**dense_weight:** `typing.Optional[float]`
+```python
+from gooey import Gooey
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3video_bots_async()
-Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
-Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
-
-
+```
-
-
--
-
-**citation_style:** `typing.Optional[VideoBotsPageRequestCitationStyle]`
-
+#### ⚙️ Parameters
+
-
-**use_url_shortener:** `typing.Optional[bool]`
-
-
-
-
-
-**asr_model:** `typing.Optional[VideoBotsPageRequestAsrModel]` — Choose a model to transcribe incoming audio messages to text.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
--
-
-**asr_language:** `typing.Optional[str]` — Choose a language to transcribe incoming audio messages to text.
-
-
--
-**translation_model:** `typing.Optional[VideoBotsPageRequestTranslationModel]`
-
+
+## CopilotIntegrations
+client.copilot_integrations.video_bots_stream_create(...)
-
-**user_language:** `typing.Optional[str]` — Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages.
-
-
-
+#### 🔌 Usage
-
-**input_glossary_document:** `typing.Optional[str]`
-
-
-Translation Glossary for User Langauge -> LLM Language (English)
-
-
-
-
-
-
-**output_glossary_document:** `typing.Optional[str]`
+```python
+from gooey import Gooey
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.copilot_integrations.video_bots_stream_create(
+ integration_id="integration_id",
+)
-Translation Glossary for LLM Language (English) -> User Langauge
-
-
+```
-
-
--
-
-**lipsync_model:** `typing.Optional[VideoBotsPageRequestLipsyncModel]`
-
+#### ⚙️ Parameters
+
-
-**tools:** `typing.Optional[typing.Sequence[LlmTools]]` — Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
-
-
-
-
-
-**avoid_repetition:** `typing.Optional[bool]`
+**integration_id:** `str` — Your Integration ID as shown in the Copilot Integrations tab
@@ -6078,7 +3012,13 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**num_outputs:** `typing.Optional[int]`
+**conversation_id:** `typing.Optional[str]`
+
+The gooey conversation ID.
+
+If not provided, a new conversation will be started and a new ID will be returned in the response. Use this to maintain the state of the conversation between requests.
+
+Note that you may not provide a custom ID here, and must only use the `conversation_id` returned in a previous response.
@@ -6086,7 +3026,11 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**quality:** `typing.Optional[float]`
+**user_id:** `typing.Optional[str]`
+
+Your app's custom user ID.
+
+If not provided, a random user will be created and a new ID will be returned in the response. If a `conversation_id` is provided, this field is automatically set to the user's id associated with that conversation.
@@ -6094,7 +3038,11 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**max_tokens:** `typing.Optional[int]`
+**user_message_id:** `typing.Optional[str]`
+
+Your app's custom message ID for the user message.
+
+If not provided, a random ID will be generated and returned in the response. This is useful for tracking messages in the conversation.
@@ -6102,7 +3050,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**sampling_temperature:** `typing.Optional[float]`
+**button_pressed:** `typing.Optional[ButtonPressed]` — The button that was pressed by the user.
@@ -6110,7 +3058,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**response_format_type:** `typing.Optional[VideoBotsPageRequestResponseFormatType]`
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -6118,7 +3066,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**tts_provider:** `typing.Optional[VideoBotsPageRequestTtsProvider]`
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -6126,7 +3074,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**uberduck_voice_name:** `typing.Optional[str]`
+**input_prompt:** `typing.Optional[str]`
@@ -6134,7 +3082,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**uberduck_speaking_rate:** `typing.Optional[float]`
+**input_audio:** `typing.Optional[str]`
@@ -6142,7 +3090,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**google_voice_name:** `typing.Optional[str]`
+**input_images:** `typing.Optional[typing.Sequence[str]]`
@@ -6150,7 +3098,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**google_speaking_rate:** `typing.Optional[float]`
+**input_documents:** `typing.Optional[typing.Sequence[str]]`
@@ -6158,7 +3106,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**google_pitch:** `typing.Optional[float]`
+**doc_extract_url:** `typing.Optional[str]` — Select a workflow to extract text from documents and images.
@@ -6166,7 +3114,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**bark_history_prompt:** `typing.Optional[str]`
+**messages:** `typing.Optional[typing.Sequence[ConversationEntry]]`
@@ -6174,7 +3122,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead
+**bot_script:** `typing.Optional[str]`
@@ -6182,7 +3130,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**elevenlabs_api_key:** `typing.Optional[str]`
+**selected_model:** `typing.Optional[CreateStreamRequestSelectedModel]`
@@ -6190,7 +3138,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**elevenlabs_voice_id:** `typing.Optional[str]`
+**document_model:** `typing.Optional[str]` — When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api))
@@ -6198,7 +3146,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**elevenlabs_model:** `typing.Optional[str]`
+**task_instructions:** `typing.Optional[str]`
@@ -6206,7 +3154,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**elevenlabs_stability:** `typing.Optional[float]`
+**query_instructions:** `typing.Optional[str]`
@@ -6214,7 +3162,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**elevenlabs_similarity_boost:** `typing.Optional[float]`
+**keyword_instructions:** `typing.Optional[str]`
@@ -6222,7 +3170,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**elevenlabs_style:** `typing.Optional[float]`
+**documents:** `typing.Optional[typing.Sequence[str]]`
@@ -6230,7 +3178,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**elevenlabs_speaker_boost:** `typing.Optional[bool]`
+**max_references:** `typing.Optional[int]`
@@ -6238,7 +3186,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**azure_voice_name:** `typing.Optional[str]`
+**max_context_words:** `typing.Optional[int]`
@@ -6246,7 +3194,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**openai_voice_name:** `typing.Optional[VideoBotsPageRequestOpenaiVoiceName]`
+**scroll_jump:** `typing.Optional[int]`
@@ -6254,7 +3202,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**openai_tts_model:** `typing.Optional[VideoBotsPageRequestOpenaiTtsModel]`
+**embedding_model:** `typing.Optional[CreateStreamRequestEmbeddingModel]`
@@ -6262,7 +3210,12 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**input_face:** `typing.Optional[str]`
+**dense_weight:** `typing.Optional[float]`
+
+
+Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
@@ -6270,7 +3223,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**face_padding_top:** `typing.Optional[int]`
+**citation_style:** `typing.Optional[CreateStreamRequestCitationStyle]`
@@ -6278,7 +3231,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**face_padding_bottom:** `typing.Optional[int]`
+**use_url_shortener:** `typing.Optional[bool]`
@@ -6286,7 +3239,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**face_padding_left:** `typing.Optional[int]`
+**asr_model:** `typing.Optional[CreateStreamRequestAsrModel]` — Choose a model to transcribe incoming audio messages to text.
@@ -6294,7 +3247,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**face_padding_right:** `typing.Optional[int]`
+**asr_language:** `typing.Optional[str]` — Choose a language to transcribe incoming audio messages to text.
@@ -6302,7 +3255,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**sadtalker_settings:** `typing.Optional[SadTalkerSettings]`
+**translation_model:** `typing.Optional[CreateStreamRequestTranslationModel]`
@@ -6310,7 +3263,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**settings:** `typing.Optional[RunSettings]`
+**user_language:** `typing.Optional[str]` — Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages.
@@ -6318,62 +3271,39 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
-
-
-
+**input_glossary_document:** `typing.Optional[str]`
+Translation Glossary for User Langauge -> LLM Language (English)
+
+
-
-
-## Evaluator
-client.evaluator.async_bulk_eval(...)
-
--
-
-#### 🔌 Usage
-
-
--
-
-```python
-from gooey import Gooey
+**output_glossary_document:** `typing.Optional[str]`
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.evaluator.async_bulk_eval(
- documents=["documents"],
-)
-```
-
-
+Translation Glossary for LLM Language (English) -> User Langauge
+
+
-#### ⚙️ Parameters
-
-
+**lipsync_model:** `typing.Optional[CreateStreamRequestLipsyncModel]`
+
+
+
+
-
-**documents:** `typing.Sequence[str]`
-
-
-Upload or link to a CSV or google sheet that contains your sample input data.
-For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs.
-Remember to includes header names in your CSV too.
-
+**tools:** `typing.Optional[typing.Sequence[LlmTools]]` — Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
@@ -6381,7 +3311,7 @@ Remember to includes header names in your CSV too.
-
-**example_id:** `typing.Optional[str]`
+**avoid_repetition:** `typing.Optional[bool]`
@@ -6389,7 +3319,7 @@ Remember to includes header names in your CSV too.
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+**num_outputs:** `typing.Optional[int]`
@@ -6397,7 +3327,7 @@ Remember to includes header names in your CSV too.
-
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+**quality:** `typing.Optional[float]`
@@ -6405,12 +3335,7 @@ Remember to includes header names in your CSV too.
-
-**eval_prompts:** `typing.Optional[typing.Sequence[EvalPrompt]]`
-
-
-Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values.
-_The `columns` dictionary can be used to reference the spreadsheet columns._
-
+**max_tokens:** `typing.Optional[int]`
@@ -6418,11 +3343,7 @@ _The `columns` dictionary can be used to reference the spreadsheet columns._
-
-**agg_functions:** `typing.Optional[typing.Sequence[AggFunction]]`
-
-
-Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats).
-
+**sampling_temperature:** `typing.Optional[float]`
@@ -6430,7 +3351,7 @@ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/
-
-**selected_model:** `typing.Optional[BulkEvalPageRequestSelectedModel]`
+**response_format_type:** `typing.Optional[CreateStreamRequestResponseFormatType]`
@@ -6438,7 +3359,7 @@ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/
-
-**avoid_repetition:** `typing.Optional[bool]`
+**tts_provider:** `typing.Optional[CreateStreamRequestTtsProvider]`
@@ -6446,7 +3367,7 @@ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/
-
-**num_outputs:** `typing.Optional[int]`
+**uberduck_voice_name:** `typing.Optional[str]`
@@ -6454,7 +3375,7 @@ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/
-
-**quality:** `typing.Optional[float]`
+**uberduck_speaking_rate:** `typing.Optional[float]`
@@ -6462,7 +3383,7 @@ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/
-
-**max_tokens:** `typing.Optional[int]`
+**google_voice_name:** `typing.Optional[str]`
@@ -6470,7 +3391,7 @@ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/
-
-**sampling_temperature:** `typing.Optional[float]`
+**google_speaking_rate:** `typing.Optional[float]`
@@ -6478,7 +3399,7 @@ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/
-
-**response_format_type:** `typing.Optional[BulkEvalPageRequestResponseFormatType]`
+**google_pitch:** `typing.Optional[float]`
@@ -6486,7 +3407,7 @@ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/
-
-**settings:** `typing.Optional[RunSettings]`
+**bark_history_prompt:** `typing.Optional[str]`
@@ -6494,56 +3415,55 @@ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead
-
-
+
+-
+**elevenlabs_api_key:** `typing.Optional[str]`
+
-
-## SmartGpt
-client.smart_gpt.async_smart_gpt(...)
-
-#### 🔌 Usage
+**elevenlabs_voice_id:** `typing.Optional[str]`
+
+
+
-
+**elevenlabs_model:** `typing.Optional[str]`
+
+
+
+
-
-```python
-from gooey import Gooey
-
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.smart_gpt.async_smart_gpt(
- input_prompt="input_prompt",
-)
-
-```
-
-
+**elevenlabs_stability:** `typing.Optional[float]`
+
-#### ⚙️ Parameters
-
-
+**elevenlabs_similarity_boost:** `typing.Optional[float]`
+
+
+
+
-
-**input_prompt:** `str`
+**elevenlabs_style:** `typing.Optional[float]`
@@ -6551,7 +3471,7 @@ client.smart_gpt.async_smart_gpt(
-
-**example_id:** `typing.Optional[str]`
+**elevenlabs_speaker_boost:** `typing.Optional[bool]`
@@ -6559,7 +3479,7 @@ client.smart_gpt.async_smart_gpt(
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+**azure_voice_name:** `typing.Optional[str]`
@@ -6567,7 +3487,7 @@ client.smart_gpt.async_smart_gpt(
-
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+**openai_voice_name:** `typing.Optional[CreateStreamRequestOpenaiVoiceName]`
@@ -6575,7 +3495,7 @@ client.smart_gpt.async_smart_gpt(
-
-**cot_prompt:** `typing.Optional[str]`
+**openai_tts_model:** `typing.Optional[CreateStreamRequestOpenaiTtsModel]`
@@ -6583,7 +3503,7 @@ client.smart_gpt.async_smart_gpt(
-
-**reflexion_prompt:** `typing.Optional[str]`
+**input_face:** `typing.Optional[str]`
@@ -6591,7 +3511,7 @@ client.smart_gpt.async_smart_gpt(
-
-**dera_prompt:** `typing.Optional[str]`
+**face_padding_top:** `typing.Optional[int]`
@@ -6599,7 +3519,7 @@ client.smart_gpt.async_smart_gpt(
-
-**selected_model:** `typing.Optional[SmartGptPageRequestSelectedModel]`
+**face_padding_bottom:** `typing.Optional[int]`
@@ -6607,7 +3527,7 @@ client.smart_gpt.async_smart_gpt(
-
-**avoid_repetition:** `typing.Optional[bool]`
+**face_padding_left:** `typing.Optional[int]`
@@ -6615,7 +3535,7 @@ client.smart_gpt.async_smart_gpt(
-
-**num_outputs:** `typing.Optional[int]`
+**face_padding_right:** `typing.Optional[int]`
@@ -6623,7 +3543,7 @@ client.smart_gpt.async_smart_gpt(
-
-**quality:** `typing.Optional[float]`
+**sadtalker_settings:** `typing.Optional[SadTalkerSettings]`
@@ -6631,7 +3551,7 @@ client.smart_gpt.async_smart_gpt(
-
-**max_tokens:** `typing.Optional[int]`
+**input_text:** `typing.Optional[str]` — Use `input_prompt` instead
@@ -6639,23 +3559,55 @@ client.smart_gpt.async_smart_gpt(
-
-**sampling_temperature:** `typing.Optional[float]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+client.copilot_integrations.video_bots_stream(...)
-
-**response_format_type:** `typing.Optional[SmartGptPageRequestResponseFormatType]`
-
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.copilot_integrations.video_bots_stream(
+ request_id="request_id",
+)
+
+```
+
+
+
+#### ⚙️ Parameters
-
-**settings:** `typing.Optional[RunSettings]`
+
+-
+
+**request_id:** `str`
@@ -6675,7 +3627,8 @@ client.smart_gpt.async_smart_gpt(
-client.smart_gpt.post()
+## CopilotForYourEnterprise
+client.copilot_for_your_enterprise.async_form_video_bots(...)
-
@@ -6693,7 +3646,7 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.smart_gpt.post()
+client.copilot_for_your_enterprise.async_form_video_bots()
```
@@ -6709,6 +3662,14 @@ client.smart_gpt.post()
-
+**example_id:** `typing.Optional[str]`
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -6721,8 +3682,8 @@ client.smart_gpt.post()
-## Functions
-client.functions.async_functions(...)
+## Evaluator
+client.evaluator.async_form_bulk_eval(...)
-
@@ -6740,7 +3701,7 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.functions.async_functions()
+client.evaluator.async_form_bulk_eval()
```
@@ -6764,23 +3725,54 @@ client.functions.async_functions()
-
-**code:** `typing.Optional[str]` — The JS code to be executed.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+## SmartGpt
+client.smart_gpt.async_form_smart_gpt(...)
-
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used in the code
-
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.smart_gpt.async_form_smart_gpt()
+
+```
+
+
+#### ⚙️ Parameters
+
+
+-
+
-
-**settings:** `typing.Optional[RunSettings]`
+**example_id:** `typing.Optional[str]`
@@ -6800,7 +3792,7 @@ client.functions.async_functions()
-client.functions.post()
+client.smart_gpt.post()
-
@@ -6818,7 +3810,7 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.functions.post()
+client.smart_gpt.post()
```
@@ -6846,8 +3838,8 @@ client.functions.post()
-## LipSyncing
-client.lip_syncing.async_lipsync(...)
+## Functions
+client.functions.async_form_functions(...)
-
@@ -6865,7 +3857,7 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.lip_syncing.async_lipsync()
+client.functions.async_form_functions()
```
@@ -6889,87 +3881,100 @@ client.lip_syncing.async_lipsync()
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
-
+
+client.functions.post()
-
-**input_face:** `typing.Optional[str]`
-
-
-
+#### 🔌 Usage
-
-**face_padding_top:** `typing.Optional[int]`
-
-
-
-
-
-**face_padding_bottom:** `typing.Optional[int]`
-
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.functions.post()
+
+```
+
+
+
+#### ⚙️ Parameters
-
-**face_padding_left:** `typing.Optional[int]`
-
-
-
-
-
-**face_padding_right:** `typing.Optional[int]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**sadtalker_settings:** `typing.Optional[SadTalkerSettings]`
-
+
+## LipSyncing
+client.lip_syncing.async_form_lipsync(...)
-
-**selected_model:** `typing.Optional[LipsyncPageRequestSelectedModel]`
-
-
-
+#### 🔌 Usage
-
-**input_audio:** `typing.Optional[str]`
-
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.lip_syncing.async_form_lipsync()
+
+```
+
+
+
+#### ⚙️ Parameters
-
-**settings:** `typing.Optional[RunSettings]`
+
+-
+
+**example_id:** `typing.Optional[str]`
diff --git a/src/gooey/__init__.py b/src/gooey/__init__.py
index e9d8b46..116880c 100644
--- a/src/gooey/__init__.py
+++ b/src/gooey/__init__.py
@@ -10,6 +10,7 @@
AsrOutputJson,
AsrPageOutput,
AsrPageOutputOutputTextItem,
+ AsrPageRequest,
AsrPageRequestOutputFormat,
AsrPageRequestSelectedModel,
AsrPageRequestTranslationModel,
@@ -17,11 +18,48 @@
AsrPageStatusResponse,
AsyncApiResponseModelV3,
BalanceResponse,
+ BodyAsyncFormArtQrCode,
+ BodyAsyncFormAsr,
+ BodyAsyncFormBulkEval,
+ BodyAsyncFormBulkRunner,
+ BodyAsyncFormChyronPlant,
+ BodyAsyncFormCompareAiUpscalers,
+ BodyAsyncFormCompareLlm,
+ BodyAsyncFormCompareText2Img,
+ BodyAsyncFormDeforumSd,
+ BodyAsyncFormDocExtract,
+ BodyAsyncFormDocSearch,
+ BodyAsyncFormDocSummary,
+ BodyAsyncFormEmailFaceInpainting,
+ BodyAsyncFormEmbeddings,
+ BodyAsyncFormFaceInpainting,
+ BodyAsyncFormFunctions,
+ BodyAsyncFormGoogleGpt,
+ BodyAsyncFormGoogleImageGen,
+ BodyAsyncFormImageSegmentation,
+ BodyAsyncFormImg2Img,
+ BodyAsyncFormLetterWriter,
+ BodyAsyncFormLipsync,
+ BodyAsyncFormLipsyncTts,
+ BodyAsyncFormObjectInpainting,
+ BodyAsyncFormRelatedQnaMaker,
+ BodyAsyncFormRelatedQnaMakerDoc,
+ BodyAsyncFormSeoSummary,
+ BodyAsyncFormSmartGpt,
+ BodyAsyncFormSocialLookupEmail,
+ BodyAsyncFormText2Audio,
+ BodyAsyncFormTextToSpeech,
+ BodyAsyncFormTranslate,
+ BodyAsyncFormVideoBots,
BotBroadcastFilters,
BulkEvalPageOutput,
+ BulkEvalPageRequest,
+ BulkEvalPageRequestResponseFormatType,
+ BulkEvalPageRequestSelectedModel,
BulkEvalPageResponse,
BulkEvalPageStatusResponse,
BulkRunnerPageOutput,
+ BulkRunnerPageRequest,
BulkRunnerPageResponse,
BulkRunnerPageStatusResponse,
ButtonPressed,
@@ -34,16 +72,19 @@
ChyronPlantPageResponse,
ChyronPlantPageStatusResponse,
CompareLlmPageOutput,
+ CompareLlmPageRequest,
CompareLlmPageRequestResponseFormatType,
CompareLlmPageRequestSelectedModelsItem,
CompareLlmPageResponse,
CompareLlmPageStatusResponse,
CompareText2ImgPageOutput,
+ CompareText2ImgPageRequest,
CompareText2ImgPageRequestScheduler,
CompareText2ImgPageRequestSelectedModelsItem,
CompareText2ImgPageResponse,
CompareText2ImgPageStatusResponse,
CompareUpscalerPageOutput,
+ CompareUpscalerPageRequest,
CompareUpscalerPageRequestSelectedModelsItem,
CompareUpscalerPageResponse,
CompareUpscalerPageStatusResponse,
@@ -58,16 +99,19 @@
ConversationStart,
CreateStreamResponse,
DeforumSdPageOutput,
+ DeforumSdPageRequest,
DeforumSdPageRequestSelectedModel,
DeforumSdPageResponse,
DeforumSdPageStatusResponse,
DocExtractPageOutput,
+ DocExtractPageRequest,
DocExtractPageRequestResponseFormatType,
DocExtractPageRequestSelectedAsrModel,
DocExtractPageRequestSelectedModel,
DocExtractPageResponse,
DocExtractPageStatusResponse,
DocSearchPageOutput,
+ DocSearchPageRequest,
DocSearchPageRequestCitationStyle,
DocSearchPageRequestEmbeddingModel,
DocSearchPageRequestKeywordQuery,
@@ -76,21 +120,25 @@
DocSearchPageResponse,
DocSearchPageStatusResponse,
DocSummaryPageOutput,
+ DocSummaryPageRequest,
DocSummaryPageRequestResponseFormatType,
DocSummaryPageRequestSelectedAsrModel,
DocSummaryPageRequestSelectedModel,
DocSummaryPageResponse,
DocSummaryPageStatusResponse,
EmailFaceInpaintingPageOutput,
+ EmailFaceInpaintingPageRequest,
EmailFaceInpaintingPageRequestSelectedModel,
EmailFaceInpaintingPageResponse,
EmailFaceInpaintingPageStatusResponse,
EmbeddingsPageOutput,
+ EmbeddingsPageRequest,
EmbeddingsPageRequestSelectedModel,
EmbeddingsPageResponse,
EmbeddingsPageStatusResponse,
EvalPrompt,
FaceInpaintingPageOutput,
+ FaceInpaintingPageRequest,
FaceInpaintingPageRequestSelectedModel,
FaceInpaintingPageResponse,
FaceInpaintingPageStatusResponse,
@@ -98,28 +146,33 @@
FailedResponseDetail,
FinalResponse,
FunctionsPageOutput,
+ FunctionsPageRequest,
FunctionsPageResponse,
FunctionsPageStatusResponse,
GenericErrorResponse,
GenericErrorResponseDetail,
GoogleGptPageOutput,
+ GoogleGptPageRequest,
GoogleGptPageRequestEmbeddingModel,
GoogleGptPageRequestResponseFormatType,
GoogleGptPageRequestSelectedModel,
GoogleGptPageResponse,
GoogleGptPageStatusResponse,
GoogleImageGenPageOutput,
+ GoogleImageGenPageRequest,
GoogleImageGenPageRequestSelectedModel,
GoogleImageGenPageResponse,
GoogleImageGenPageStatusResponse,
HttpValidationError,
ImageSegmentationPageOutput,
+ ImageSegmentationPageRequest,
ImageSegmentationPageRequestSelectedModel,
ImageSegmentationPageResponse,
ImageSegmentationPageStatusResponse,
ImageUrl,
ImageUrlDetail,
Img2ImgPageOutput,
+ Img2ImgPageRequest,
Img2ImgPageRequestSelectedControlnetModel,
Img2ImgPageRequestSelectedControlnetModelItem,
Img2ImgPageRequestSelectedModel,
@@ -130,9 +183,12 @@
LetterWriterPageResponse,
LetterWriterPageStatusResponse,
LipsyncPageOutput,
+ LipsyncPageRequest,
+ LipsyncPageRequestSelectedModel,
LipsyncPageResponse,
LipsyncPageStatusResponse,
LipsyncTtsPageOutput,
+ LipsyncTtsPageRequest,
LipsyncTtsPageRequestOpenaiTtsModel,
LipsyncTtsPageRequestOpenaiVoiceName,
LipsyncTtsPageRequestSelectedModel,
@@ -142,12 +198,14 @@
LlmTools,
MessagePart,
ObjectInpaintingPageOutput,
+ ObjectInpaintingPageRequest,
ObjectInpaintingPageRequestSelectedModel,
ObjectInpaintingPageResponse,
ObjectInpaintingPageStatusResponse,
PromptTreeNode,
PromptTreeNodePrompt,
QrCodeGeneratorPageOutput,
+ QrCodeGeneratorPageRequest,
QrCodeGeneratorPageRequestImagePromptControlnetModelsItem,
QrCodeGeneratorPageRequestScheduler,
QrCodeGeneratorPageRequestSelectedControlnetModelItem,
@@ -160,6 +218,7 @@
RelatedDocSearchResponse,
RelatedGoogleGptResponse,
RelatedQnADocPageOutput,
+ RelatedQnADocPageRequest,
RelatedQnADocPageRequestCitationStyle,
RelatedQnADocPageRequestEmbeddingModel,
RelatedQnADocPageRequestKeywordQuery,
@@ -168,6 +227,7 @@
RelatedQnADocPageResponse,
RelatedQnADocPageStatusResponse,
RelatedQnAPageOutput,
+ RelatedQnAPageRequest,
RelatedQnAPageRequestEmbeddingModel,
RelatedQnAPageRequestResponseFormatType,
RelatedQnAPageRequestSelectedModel,
@@ -184,6 +244,7 @@
SadTalkerSettingsPreprocess,
SearchReference,
SeoSummaryPageOutput,
+ SeoSummaryPageRequest,
SeoSummaryPageRequestResponseFormatType,
SeoSummaryPageRequestSelectedModel,
SeoSummaryPageResponse,
@@ -191,18 +252,24 @@
SerpSearchLocation,
SerpSearchType,
SmartGptPageOutput,
+ SmartGptPageRequest,
+ SmartGptPageRequestResponseFormatType,
+ SmartGptPageRequestSelectedModel,
SmartGptPageResponse,
SmartGptPageStatusResponse,
SocialLookupEmailPageOutput,
+ SocialLookupEmailPageRequest,
SocialLookupEmailPageRequestResponseFormatType,
SocialLookupEmailPageRequestSelectedModel,
SocialLookupEmailPageResponse,
SocialLookupEmailPageStatusResponse,
StreamError,
Text2AudioPageOutput,
+ Text2AudioPageRequest,
Text2AudioPageResponse,
Text2AudioPageStatusResponse,
TextToSpeechPageOutput,
+ TextToSpeechPageRequest,
TextToSpeechPageRequestOpenaiTtsModel,
TextToSpeechPageRequestOpenaiVoiceName,
TextToSpeechPageRequestTtsProvider,
@@ -210,6 +277,7 @@
TextToSpeechPageStatusResponse,
TrainingDataModel,
TranslationPageOutput,
+ TranslationPageRequest,
TranslationPageRequestSelectedModel,
TranslationPageResponse,
TranslationPageStatusResponse,
@@ -219,10 +287,27 @@
VideoBotsPageOutput,
VideoBotsPageOutputFinalKeywordQuery,
VideoBotsPageOutputFinalPrompt,
+ VideoBotsPageRequest,
+ VideoBotsPageRequestAsrModel,
+ VideoBotsPageRequestCitationStyle,
+ VideoBotsPageRequestEmbeddingModel,
+ VideoBotsPageRequestLipsyncModel,
+ VideoBotsPageRequestOpenaiTtsModel,
+ VideoBotsPageRequestOpenaiVoiceName,
+ VideoBotsPageRequestResponseFormatType,
+ VideoBotsPageRequestSelectedModel,
+ VideoBotsPageRequestTranslationModel,
+ VideoBotsPageRequestTtsProvider,
VideoBotsPageResponse,
VideoBotsPageStatusResponse,
)
-from .errors import PaymentRequiredError, TooManyRequestsError, UnprocessableEntityError
+from .errors import (
+ BadRequestError,
+ InternalServerError,
+ PaymentRequiredError,
+ TooManyRequestsError,
+ UnprocessableEntityError,
+)
from . import (
bulk_runner,
copilot_for_your_enterprise,
@@ -235,18 +320,6 @@
smart_gpt,
)
from .client import AsyncGooey, Gooey
-from .copilot_for_your_enterprise import (
- VideoBotsPageRequestAsrModel,
- VideoBotsPageRequestCitationStyle,
- VideoBotsPageRequestEmbeddingModel,
- VideoBotsPageRequestLipsyncModel,
- VideoBotsPageRequestOpenaiTtsModel,
- VideoBotsPageRequestOpenaiVoiceName,
- VideoBotsPageRequestResponseFormatType,
- VideoBotsPageRequestSelectedModel,
- VideoBotsPageRequestTranslationModel,
- VideoBotsPageRequestTtsProvider,
-)
from .copilot_integrations import (
CreateStreamRequestAsrModel,
CreateStreamRequestCitationStyle,
@@ -261,9 +334,6 @@
VideoBotsStreamResponse,
)
from .environment import GooeyEnvironment
-from .evaluator import BulkEvalPageRequestResponseFormatType, BulkEvalPageRequestSelectedModel
-from .lip_syncing import LipsyncPageRequestSelectedModel
-from .smart_gpt import SmartGptPageRequestResponseFormatType, SmartGptPageRequestSelectedModel
from .version import __version__
__all__ = [
@@ -276,6 +346,7 @@
"AsrOutputJson",
"AsrPageOutput",
"AsrPageOutputOutputTextItem",
+ "AsrPageRequest",
"AsrPageRequestOutputFormat",
"AsrPageRequestSelectedModel",
"AsrPageRequestTranslationModel",
@@ -283,14 +354,50 @@
"AsrPageStatusResponse",
"AsyncApiResponseModelV3",
"AsyncGooey",
+ "BadRequestError",
"BalanceResponse",
+ "BodyAsyncFormArtQrCode",
+ "BodyAsyncFormAsr",
+ "BodyAsyncFormBulkEval",
+ "BodyAsyncFormBulkRunner",
+ "BodyAsyncFormChyronPlant",
+ "BodyAsyncFormCompareAiUpscalers",
+ "BodyAsyncFormCompareLlm",
+ "BodyAsyncFormCompareText2Img",
+ "BodyAsyncFormDeforumSd",
+ "BodyAsyncFormDocExtract",
+ "BodyAsyncFormDocSearch",
+ "BodyAsyncFormDocSummary",
+ "BodyAsyncFormEmailFaceInpainting",
+ "BodyAsyncFormEmbeddings",
+ "BodyAsyncFormFaceInpainting",
+ "BodyAsyncFormFunctions",
+ "BodyAsyncFormGoogleGpt",
+ "BodyAsyncFormGoogleImageGen",
+ "BodyAsyncFormImageSegmentation",
+ "BodyAsyncFormImg2Img",
+ "BodyAsyncFormLetterWriter",
+ "BodyAsyncFormLipsync",
+ "BodyAsyncFormLipsyncTts",
+ "BodyAsyncFormObjectInpainting",
+ "BodyAsyncFormRelatedQnaMaker",
+ "BodyAsyncFormRelatedQnaMakerDoc",
+ "BodyAsyncFormSeoSummary",
+ "BodyAsyncFormSmartGpt",
+ "BodyAsyncFormSocialLookupEmail",
+ "BodyAsyncFormText2Audio",
+ "BodyAsyncFormTextToSpeech",
+ "BodyAsyncFormTranslate",
+ "BodyAsyncFormVideoBots",
"BotBroadcastFilters",
"BulkEvalPageOutput",
+ "BulkEvalPageRequest",
"BulkEvalPageRequestResponseFormatType",
"BulkEvalPageRequestSelectedModel",
"BulkEvalPageResponse",
"BulkEvalPageStatusResponse",
"BulkRunnerPageOutput",
+ "BulkRunnerPageRequest",
"BulkRunnerPageResponse",
"BulkRunnerPageStatusResponse",
"ButtonPressed",
@@ -303,16 +410,19 @@
"ChyronPlantPageResponse",
"ChyronPlantPageStatusResponse",
"CompareLlmPageOutput",
+ "CompareLlmPageRequest",
"CompareLlmPageRequestResponseFormatType",
"CompareLlmPageRequestSelectedModelsItem",
"CompareLlmPageResponse",
"CompareLlmPageStatusResponse",
"CompareText2ImgPageOutput",
+ "CompareText2ImgPageRequest",
"CompareText2ImgPageRequestScheduler",
"CompareText2ImgPageRequestSelectedModelsItem",
"CompareText2ImgPageResponse",
"CompareText2ImgPageStatusResponse",
"CompareUpscalerPageOutput",
+ "CompareUpscalerPageRequest",
"CompareUpscalerPageRequestSelectedModelsItem",
"CompareUpscalerPageResponse",
"CompareUpscalerPageStatusResponse",
@@ -337,16 +447,19 @@
"CreateStreamRequestTtsProvider",
"CreateStreamResponse",
"DeforumSdPageOutput",
+ "DeforumSdPageRequest",
"DeforumSdPageRequestSelectedModel",
"DeforumSdPageResponse",
"DeforumSdPageStatusResponse",
"DocExtractPageOutput",
+ "DocExtractPageRequest",
"DocExtractPageRequestResponseFormatType",
"DocExtractPageRequestSelectedAsrModel",
"DocExtractPageRequestSelectedModel",
"DocExtractPageResponse",
"DocExtractPageStatusResponse",
"DocSearchPageOutput",
+ "DocSearchPageRequest",
"DocSearchPageRequestCitationStyle",
"DocSearchPageRequestEmbeddingModel",
"DocSearchPageRequestKeywordQuery",
@@ -355,21 +468,25 @@
"DocSearchPageResponse",
"DocSearchPageStatusResponse",
"DocSummaryPageOutput",
+ "DocSummaryPageRequest",
"DocSummaryPageRequestResponseFormatType",
"DocSummaryPageRequestSelectedAsrModel",
"DocSummaryPageRequestSelectedModel",
"DocSummaryPageResponse",
"DocSummaryPageStatusResponse",
"EmailFaceInpaintingPageOutput",
+ "EmailFaceInpaintingPageRequest",
"EmailFaceInpaintingPageRequestSelectedModel",
"EmailFaceInpaintingPageResponse",
"EmailFaceInpaintingPageStatusResponse",
"EmbeddingsPageOutput",
+ "EmbeddingsPageRequest",
"EmbeddingsPageRequestSelectedModel",
"EmbeddingsPageResponse",
"EmbeddingsPageStatusResponse",
"EvalPrompt",
"FaceInpaintingPageOutput",
+ "FaceInpaintingPageRequest",
"FaceInpaintingPageRequestSelectedModel",
"FaceInpaintingPageResponse",
"FaceInpaintingPageStatusResponse",
@@ -377,6 +494,7 @@
"FailedResponseDetail",
"FinalResponse",
"FunctionsPageOutput",
+ "FunctionsPageRequest",
"FunctionsPageResponse",
"FunctionsPageStatusResponse",
"GenericErrorResponse",
@@ -384,37 +502,44 @@
"Gooey",
"GooeyEnvironment",
"GoogleGptPageOutput",
+ "GoogleGptPageRequest",
"GoogleGptPageRequestEmbeddingModel",
"GoogleGptPageRequestResponseFormatType",
"GoogleGptPageRequestSelectedModel",
"GoogleGptPageResponse",
"GoogleGptPageStatusResponse",
"GoogleImageGenPageOutput",
+ "GoogleImageGenPageRequest",
"GoogleImageGenPageRequestSelectedModel",
"GoogleImageGenPageResponse",
"GoogleImageGenPageStatusResponse",
"HttpValidationError",
"ImageSegmentationPageOutput",
+ "ImageSegmentationPageRequest",
"ImageSegmentationPageRequestSelectedModel",
"ImageSegmentationPageResponse",
"ImageSegmentationPageStatusResponse",
"ImageUrl",
"ImageUrlDetail",
"Img2ImgPageOutput",
+ "Img2ImgPageRequest",
"Img2ImgPageRequestSelectedControlnetModel",
"Img2ImgPageRequestSelectedControlnetModelItem",
"Img2ImgPageRequestSelectedModel",
"Img2ImgPageResponse",
"Img2ImgPageStatusResponse",
+ "InternalServerError",
"LetterWriterPageOutput",
"LetterWriterPageRequest",
"LetterWriterPageResponse",
"LetterWriterPageStatusResponse",
"LipsyncPageOutput",
+ "LipsyncPageRequest",
"LipsyncPageRequestSelectedModel",
"LipsyncPageResponse",
"LipsyncPageStatusResponse",
"LipsyncTtsPageOutput",
+ "LipsyncTtsPageRequest",
"LipsyncTtsPageRequestOpenaiTtsModel",
"LipsyncTtsPageRequestOpenaiVoiceName",
"LipsyncTtsPageRequestSelectedModel",
@@ -424,6 +549,7 @@
"LlmTools",
"MessagePart",
"ObjectInpaintingPageOutput",
+ "ObjectInpaintingPageRequest",
"ObjectInpaintingPageRequestSelectedModel",
"ObjectInpaintingPageResponse",
"ObjectInpaintingPageStatusResponse",
@@ -431,6 +557,7 @@
"PromptTreeNode",
"PromptTreeNodePrompt",
"QrCodeGeneratorPageOutput",
+ "QrCodeGeneratorPageRequest",
"QrCodeGeneratorPageRequestImagePromptControlnetModelsItem",
"QrCodeGeneratorPageRequestScheduler",
"QrCodeGeneratorPageRequestSelectedControlnetModelItem",
@@ -443,6 +570,7 @@
"RelatedDocSearchResponse",
"RelatedGoogleGptResponse",
"RelatedQnADocPageOutput",
+ "RelatedQnADocPageRequest",
"RelatedQnADocPageRequestCitationStyle",
"RelatedQnADocPageRequestEmbeddingModel",
"RelatedQnADocPageRequestKeywordQuery",
@@ -451,6 +579,7 @@
"RelatedQnADocPageResponse",
"RelatedQnADocPageStatusResponse",
"RelatedQnAPageOutput",
+ "RelatedQnAPageRequest",
"RelatedQnAPageRequestEmbeddingModel",
"RelatedQnAPageRequestResponseFormatType",
"RelatedQnAPageRequestSelectedModel",
@@ -467,6 +596,7 @@
"SadTalkerSettingsPreprocess",
"SearchReference",
"SeoSummaryPageOutput",
+ "SeoSummaryPageRequest",
"SeoSummaryPageRequestResponseFormatType",
"SeoSummaryPageRequestSelectedModel",
"SeoSummaryPageResponse",
@@ -474,20 +604,24 @@
"SerpSearchLocation",
"SerpSearchType",
"SmartGptPageOutput",
+ "SmartGptPageRequest",
"SmartGptPageRequestResponseFormatType",
"SmartGptPageRequestSelectedModel",
"SmartGptPageResponse",
"SmartGptPageStatusResponse",
"SocialLookupEmailPageOutput",
+ "SocialLookupEmailPageRequest",
"SocialLookupEmailPageRequestResponseFormatType",
"SocialLookupEmailPageRequestSelectedModel",
"SocialLookupEmailPageResponse",
"SocialLookupEmailPageStatusResponse",
"StreamError",
"Text2AudioPageOutput",
+ "Text2AudioPageRequest",
"Text2AudioPageResponse",
"Text2AudioPageStatusResponse",
"TextToSpeechPageOutput",
+ "TextToSpeechPageRequest",
"TextToSpeechPageRequestOpenaiTtsModel",
"TextToSpeechPageRequestOpenaiVoiceName",
"TextToSpeechPageRequestTtsProvider",
@@ -496,6 +630,7 @@
"TooManyRequestsError",
"TrainingDataModel",
"TranslationPageOutput",
+ "TranslationPageRequest",
"TranslationPageRequestSelectedModel",
"TranslationPageResponse",
"TranslationPageStatusResponse",
@@ -506,6 +641,7 @@
"VideoBotsPageOutput",
"VideoBotsPageOutputFinalKeywordQuery",
"VideoBotsPageOutputFinalPrompt",
+ "VideoBotsPageRequest",
"VideoBotsPageRequestAsrModel",
"VideoBotsPageRequestCitationStyle",
"VideoBotsPageRequestEmbeddingModel",
diff --git a/src/gooey/client.py b/src/gooey/client.py
index b21e22a..fd604e2 100644
--- a/src/gooey/client.py
+++ b/src/gooey/client.py
@@ -15,6 +15,8 @@
from .core.request_options import RequestOptions
from .embeddings.client import AsyncEmbeddingsClient, EmbeddingsClient
from .environment import GooeyEnvironment
+from .errors.bad_request_error import BadRequestError
+from .errors.internal_server_error import InternalServerError
from .errors.payment_required_error import PaymentRequiredError
from .errors.too_many_requests_error import TooManyRequestsError
from .errors.unprocessable_entity_error import UnprocessableEntityError
@@ -23,103 +25,68 @@
from .lip_syncing.client import AsyncLipSyncingClient, LipSyncingClient
from .misc.client import AsyncMiscClient, MiscClient
from .smart_gpt.client import AsyncSmartGptClient, SmartGptClient
-from .types.animation_prompt import AnimationPrompt
-from .types.asr_page_request_output_format import AsrPageRequestOutputFormat
-from .types.asr_page_request_selected_model import AsrPageRequestSelectedModel
-from .types.asr_page_request_translation_model import AsrPageRequestTranslationModel
from .types.asr_page_response import AsrPageResponse
+from .types.body_async_form_art_qr_code import BodyAsyncFormArtQrCode
+from .types.body_async_form_asr import BodyAsyncFormAsr
+from .types.body_async_form_bulk_runner import BodyAsyncFormBulkRunner
+from .types.body_async_form_compare_ai_upscalers import BodyAsyncFormCompareAiUpscalers
+from .types.body_async_form_compare_llm import BodyAsyncFormCompareLlm
+from .types.body_async_form_compare_text2img import BodyAsyncFormCompareText2Img
+from .types.body_async_form_deforum_sd import BodyAsyncFormDeforumSd
+from .types.body_async_form_doc_extract import BodyAsyncFormDocExtract
+from .types.body_async_form_doc_search import BodyAsyncFormDocSearch
+from .types.body_async_form_doc_summary import BodyAsyncFormDocSummary
+from .types.body_async_form_email_face_inpainting import BodyAsyncFormEmailFaceInpainting
+from .types.body_async_form_embeddings import BodyAsyncFormEmbeddings
+from .types.body_async_form_face_inpainting import BodyAsyncFormFaceInpainting
+from .types.body_async_form_google_gpt import BodyAsyncFormGoogleGpt
+from .types.body_async_form_google_image_gen import BodyAsyncFormGoogleImageGen
+from .types.body_async_form_image_segmentation import BodyAsyncFormImageSegmentation
+from .types.body_async_form_img2img import BodyAsyncFormImg2Img
+from .types.body_async_form_lipsync_tts import BodyAsyncFormLipsyncTts
+from .types.body_async_form_object_inpainting import BodyAsyncFormObjectInpainting
+from .types.body_async_form_related_qna_maker import BodyAsyncFormRelatedQnaMaker
+from .types.body_async_form_related_qna_maker_doc import BodyAsyncFormRelatedQnaMakerDoc
+from .types.body_async_form_seo_summary import BodyAsyncFormSeoSummary
+from .types.body_async_form_social_lookup_email import BodyAsyncFormSocialLookupEmail
+from .types.body_async_form_text2audio import BodyAsyncFormText2Audio
+from .types.body_async_form_text_to_speech import BodyAsyncFormTextToSpeech
+from .types.body_async_form_translate import BodyAsyncFormTranslate
+from .types.bulk_eval_page_response import BulkEvalPageResponse
from .types.bulk_runner_page_response import BulkRunnerPageResponse
-from .types.compare_llm_page_request_response_format_type import CompareLlmPageRequestResponseFormatType
-from .types.compare_llm_page_request_selected_models_item import CompareLlmPageRequestSelectedModelsItem
+from .types.chyron_plant_page_response import ChyronPlantPageResponse
from .types.compare_llm_page_response import CompareLlmPageResponse
-from .types.compare_text2img_page_request_scheduler import CompareText2ImgPageRequestScheduler
-from .types.compare_text2img_page_request_selected_models_item import CompareText2ImgPageRequestSelectedModelsItem
from .types.compare_text2img_page_response import CompareText2ImgPageResponse
-from .types.compare_upscaler_page_request_selected_models_item import CompareUpscalerPageRequestSelectedModelsItem
from .types.compare_upscaler_page_response import CompareUpscalerPageResponse
-from .types.deforum_sd_page_request_selected_model import DeforumSdPageRequestSelectedModel
from .types.deforum_sd_page_response import DeforumSdPageResponse
-from .types.doc_extract_page_request_response_format_type import DocExtractPageRequestResponseFormatType
-from .types.doc_extract_page_request_selected_asr_model import DocExtractPageRequestSelectedAsrModel
-from .types.doc_extract_page_request_selected_model import DocExtractPageRequestSelectedModel
from .types.doc_extract_page_response import DocExtractPageResponse
-from .types.doc_search_page_request_citation_style import DocSearchPageRequestCitationStyle
-from .types.doc_search_page_request_embedding_model import DocSearchPageRequestEmbeddingModel
-from .types.doc_search_page_request_keyword_query import DocSearchPageRequestKeywordQuery
-from .types.doc_search_page_request_response_format_type import DocSearchPageRequestResponseFormatType
-from .types.doc_search_page_request_selected_model import DocSearchPageRequestSelectedModel
from .types.doc_search_page_response import DocSearchPageResponse
-from .types.doc_summary_page_request_response_format_type import DocSummaryPageRequestResponseFormatType
-from .types.doc_summary_page_request_selected_asr_model import DocSummaryPageRequestSelectedAsrModel
-from .types.doc_summary_page_request_selected_model import DocSummaryPageRequestSelectedModel
from .types.doc_summary_page_response import DocSummaryPageResponse
-from .types.email_face_inpainting_page_request_selected_model import EmailFaceInpaintingPageRequestSelectedModel
from .types.email_face_inpainting_page_response import EmailFaceInpaintingPageResponse
-from .types.embeddings_page_request_selected_model import EmbeddingsPageRequestSelectedModel
from .types.embeddings_page_response import EmbeddingsPageResponse
-from .types.face_inpainting_page_request_selected_model import FaceInpaintingPageRequestSelectedModel
from .types.face_inpainting_page_response import FaceInpaintingPageResponse
+from .types.failed_reponse_model_v2 import FailedReponseModelV2
+from .types.functions_page_response import FunctionsPageResponse
from .types.generic_error_response import GenericErrorResponse
-from .types.google_gpt_page_request_embedding_model import GoogleGptPageRequestEmbeddingModel
-from .types.google_gpt_page_request_response_format_type import GoogleGptPageRequestResponseFormatType
-from .types.google_gpt_page_request_selected_model import GoogleGptPageRequestSelectedModel
from .types.google_gpt_page_response import GoogleGptPageResponse
-from .types.google_image_gen_page_request_selected_model import GoogleImageGenPageRequestSelectedModel
from .types.google_image_gen_page_response import GoogleImageGenPageResponse
from .types.http_validation_error import HttpValidationError
-from .types.image_segmentation_page_request_selected_model import ImageSegmentationPageRequestSelectedModel
from .types.image_segmentation_page_response import ImageSegmentationPageResponse
-from .types.img2img_page_request_selected_controlnet_model import Img2ImgPageRequestSelectedControlnetModel
-from .types.img2img_page_request_selected_model import Img2ImgPageRequestSelectedModel
from .types.img2img_page_response import Img2ImgPageResponse
-from .types.lipsync_tts_page_request_openai_tts_model import LipsyncTtsPageRequestOpenaiTtsModel
-from .types.lipsync_tts_page_request_openai_voice_name import LipsyncTtsPageRequestOpenaiVoiceName
-from .types.lipsync_tts_page_request_selected_model import LipsyncTtsPageRequestSelectedModel
-from .types.lipsync_tts_page_request_tts_provider import LipsyncTtsPageRequestTtsProvider
+from .types.letter_writer_page_response import LetterWriterPageResponse
+from .types.lipsync_page_response import LipsyncPageResponse
from .types.lipsync_tts_page_response import LipsyncTtsPageResponse
-from .types.object_inpainting_page_request_selected_model import ObjectInpaintingPageRequestSelectedModel
from .types.object_inpainting_page_response import ObjectInpaintingPageResponse
-from .types.qr_code_generator_page_request_image_prompt_controlnet_models_item import (
- QrCodeGeneratorPageRequestImagePromptControlnetModelsItem,
-)
-from .types.qr_code_generator_page_request_scheduler import QrCodeGeneratorPageRequestScheduler
-from .types.qr_code_generator_page_request_selected_controlnet_model_item import (
- QrCodeGeneratorPageRequestSelectedControlnetModelItem,
-)
-from .types.qr_code_generator_page_request_selected_model import QrCodeGeneratorPageRequestSelectedModel
from .types.qr_code_generator_page_response import QrCodeGeneratorPageResponse
-from .types.recipe_function import RecipeFunction
-from .types.related_qn_a_doc_page_request_citation_style import RelatedQnADocPageRequestCitationStyle
-from .types.related_qn_a_doc_page_request_embedding_model import RelatedQnADocPageRequestEmbeddingModel
-from .types.related_qn_a_doc_page_request_keyword_query import RelatedQnADocPageRequestKeywordQuery
-from .types.related_qn_a_doc_page_request_response_format_type import RelatedQnADocPageRequestResponseFormatType
-from .types.related_qn_a_doc_page_request_selected_model import RelatedQnADocPageRequestSelectedModel
from .types.related_qn_a_doc_page_response import RelatedQnADocPageResponse
-from .types.related_qn_a_page_request_embedding_model import RelatedQnAPageRequestEmbeddingModel
-from .types.related_qn_a_page_request_response_format_type import RelatedQnAPageRequestResponseFormatType
-from .types.related_qn_a_page_request_selected_model import RelatedQnAPageRequestSelectedModel
from .types.related_qn_a_page_response import RelatedQnAPageResponse
-from .types.run_settings import RunSettings
-from .types.sad_talker_settings import SadTalkerSettings
-from .types.seo_summary_page_request_response_format_type import SeoSummaryPageRequestResponseFormatType
-from .types.seo_summary_page_request_selected_model import SeoSummaryPageRequestSelectedModel
from .types.seo_summary_page_response import SeoSummaryPageResponse
-from .types.serp_search_location import SerpSearchLocation
-from .types.serp_search_type import SerpSearchType
-from .types.social_lookup_email_page_request_response_format_type import SocialLookupEmailPageRequestResponseFormatType
-from .types.social_lookup_email_page_request_selected_model import SocialLookupEmailPageRequestSelectedModel
+from .types.smart_gpt_page_response import SmartGptPageResponse
from .types.social_lookup_email_page_response import SocialLookupEmailPageResponse
from .types.text2audio_page_response import Text2AudioPageResponse
-from .types.text_to_speech_page_request_openai_tts_model import TextToSpeechPageRequestOpenaiTtsModel
-from .types.text_to_speech_page_request_openai_voice_name import TextToSpeechPageRequestOpenaiVoiceName
-from .types.text_to_speech_page_request_tts_provider import TextToSpeechPageRequestTtsProvider
from .types.text_to_speech_page_response import TextToSpeechPageResponse
-from .types.translation_page_request_selected_model import TranslationPageRequestSelectedModel
from .types.translation_page_response import TranslationPageResponse
-from .types.vcard import Vcard
-
-# this is used as the default value for optional parameters
-OMIT = typing.cast(typing.Any, ...)
+from .types.video_bots_page_response import VideoBotsPageResponse
class Gooey:
@@ -193,113 +160,40 @@ def __init__(
self.embeddings = EmbeddingsClient(client_wrapper=self._client_wrapper)
def animate(
- self,
- *,
- animation_prompts: typing.Sequence[AnimationPrompt],
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- max_frames: typing.Optional[int] = OMIT,
- selected_model: typing.Optional[DeforumSdPageRequestSelectedModel] = OMIT,
- animation_mode: typing.Optional[str] = OMIT,
- zoom: typing.Optional[str] = OMIT,
- translation_x: typing.Optional[str] = OMIT,
- translation_y: typing.Optional[str] = OMIT,
- rotation3d_x: typing.Optional[str] = OMIT,
- rotation3d_y: typing.Optional[str] = OMIT,
- rotation3d_z: typing.Optional[str] = OMIT,
- fps: typing.Optional[int] = OMIT,
- seed: typing.Optional[int] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> DeforumSdPageResponse:
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormDeforumSd:
"""
Parameters
----------
- animation_prompts : typing.Sequence[AnimationPrompt]
-
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- max_frames : typing.Optional[int]
-
- selected_model : typing.Optional[DeforumSdPageRequestSelectedModel]
-
- animation_mode : typing.Optional[str]
-
- zoom : typing.Optional[str]
-
- translation_x : typing.Optional[str]
-
- translation_y : typing.Optional[str]
-
- rotation3d_x : typing.Optional[str]
-
- rotation3d_y : typing.Optional[str]
-
- rotation3d_z : typing.Optional[str]
-
- fps : typing.Optional[int]
-
- seed : typing.Optional[int]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- DeforumSdPageResponse
+ BodyAsyncFormDeforumSd
Successful Response
Examples
--------
- from gooey import AnimationPrompt, Gooey
+ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.animate(
- animation_prompts=[
- AnimationPrompt(
- frame="frame",
- prompt="prompt",
- )
- ],
- )
+ client.animate()
"""
_response = self._client_wrapper.httpx_client.request(
- "v3/DeforumSD/async",
- method="POST",
- params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "animation_prompts": animation_prompts,
- "max_frames": max_frames,
- "selected_model": selected_model,
- "animation_mode": animation_mode,
- "zoom": zoom,
- "translation_x": translation_x,
- "translation_y": translation_y,
- "rotation_3d_x": rotation3d_x,
- "rotation_3d_y": rotation3d_y,
- "rotation_3d_z": rotation3d_z,
- "fps": fps,
- "seed": seed,
- "settings": settings,
- },
- request_options=request_options,
- omit=OMIT,
+ "v3/DeforumSD/async/form", method="POST", params={"example_id": example_id}, request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(DeforumSdPageResponse, parse_obj_as(type_=DeforumSdPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(BodyAsyncFormDeforumSd, parse_obj_as(type_=BodyAsyncFormDeforumSd, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
if _response.status_code == 402:
raise PaymentRequiredError(
typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
@@ -312,120 +206,29 @@ def animate(
raise TooManyRequestsError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
)
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
def qr_code(
- self,
- *,
- text_prompt: str,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- qr_code_data: typing.Optional[str] = OMIT,
- qr_code_input_image: typing.Optional[str] = OMIT,
- qr_code_vcard: typing.Optional[Vcard] = OMIT,
- qr_code_file: typing.Optional[str] = OMIT,
- use_url_shortener: typing.Optional[bool] = OMIT,
- negative_prompt: typing.Optional[str] = OMIT,
- image_prompt: typing.Optional[str] = OMIT,
- image_prompt_controlnet_models: typing.Optional[
- typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]
- ] = OMIT,
- image_prompt_strength: typing.Optional[float] = OMIT,
- image_prompt_scale: typing.Optional[float] = OMIT,
- image_prompt_pos_x: typing.Optional[float] = OMIT,
- image_prompt_pos_y: typing.Optional[float] = OMIT,
- selected_model: typing.Optional[QrCodeGeneratorPageRequestSelectedModel] = OMIT,
- selected_controlnet_model: typing.Optional[
- typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem]
- ] = OMIT,
- output_width: typing.Optional[int] = OMIT,
- output_height: typing.Optional[int] = OMIT,
- guidance_scale: typing.Optional[float] = OMIT,
- controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[int] = OMIT,
- scheduler: typing.Optional[QrCodeGeneratorPageRequestScheduler] = OMIT,
- seed: typing.Optional[int] = OMIT,
- obj_scale: typing.Optional[float] = OMIT,
- obj_pos_x: typing.Optional[float] = OMIT,
- obj_pos_y: typing.Optional[float] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> QrCodeGeneratorPageResponse:
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormArtQrCode:
"""
Parameters
----------
- text_prompt : str
-
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- qr_code_data : typing.Optional[str]
-
- qr_code_input_image : typing.Optional[str]
-
- qr_code_vcard : typing.Optional[Vcard]
-
- qr_code_file : typing.Optional[str]
-
- use_url_shortener : typing.Optional[bool]
-
- negative_prompt : typing.Optional[str]
-
- image_prompt : typing.Optional[str]
-
- image_prompt_controlnet_models : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]]
-
- image_prompt_strength : typing.Optional[float]
-
- image_prompt_scale : typing.Optional[float]
-
- image_prompt_pos_x : typing.Optional[float]
-
- image_prompt_pos_y : typing.Optional[float]
-
- selected_model : typing.Optional[QrCodeGeneratorPageRequestSelectedModel]
-
- selected_controlnet_model : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem]]
-
- output_width : typing.Optional[int]
-
- output_height : typing.Optional[int]
-
- guidance_scale : typing.Optional[float]
-
- controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]]
-
- num_outputs : typing.Optional[int]
-
- quality : typing.Optional[int]
-
- scheduler : typing.Optional[QrCodeGeneratorPageRequestScheduler]
-
- seed : typing.Optional[int]
-
- obj_scale : typing.Optional[float]
-
- obj_pos_x : typing.Optional[float]
-
- obj_pos_y : typing.Optional[float]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- QrCodeGeneratorPageResponse
+ BodyAsyncFormArtQrCode
Successful Response
Examples
@@ -435,51 +238,21 @@ def qr_code(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.qr_code(
- text_prompt="text_prompt",
- )
+ client.qr_code()
"""
_response = self._client_wrapper.httpx_client.request(
- "v3/art-qr-code/async",
+ "v3/art-qr-code/async/form",
method="POST",
params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "qr_code_data": qr_code_data,
- "qr_code_input_image": qr_code_input_image,
- "qr_code_vcard": qr_code_vcard,
- "qr_code_file": qr_code_file,
- "use_url_shortener": use_url_shortener,
- "text_prompt": text_prompt,
- "negative_prompt": negative_prompt,
- "image_prompt": image_prompt,
- "image_prompt_controlnet_models": image_prompt_controlnet_models,
- "image_prompt_strength": image_prompt_strength,
- "image_prompt_scale": image_prompt_scale,
- "image_prompt_pos_x": image_prompt_pos_x,
- "image_prompt_pos_y": image_prompt_pos_y,
- "selected_model": selected_model,
- "selected_controlnet_model": selected_controlnet_model,
- "output_width": output_width,
- "output_height": output_height,
- "guidance_scale": guidance_scale,
- "controlnet_conditioning_scale": controlnet_conditioning_scale,
- "num_outputs": num_outputs,
- "quality": quality,
- "scheduler": scheduler,
- "seed": seed,
- "obj_scale": obj_scale,
- "obj_pos_x": obj_pos_x,
- "obj_pos_y": obj_pos_y,
- "settings": settings,
- },
request_options=request_options,
- omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(QrCodeGeneratorPageResponse, parse_obj_as(type_=QrCodeGeneratorPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(BodyAsyncFormArtQrCode, parse_obj_as(type_=BodyAsyncFormArtQrCode, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
if _response.status_code == 402:
raise PaymentRequiredError(
typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
@@ -492,107 +265,29 @@ def qr_code(
raise TooManyRequestsError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
)
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
def seo_people_also_ask(
- self,
- *,
- search_query: str,
- site_filter: str,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- task_instructions: typing.Optional[str] = OMIT,
- query_instructions: typing.Optional[str] = OMIT,
- selected_model: typing.Optional[RelatedQnAPageRequestSelectedModel] = OMIT,
- max_search_urls: typing.Optional[int] = OMIT,
- max_references: typing.Optional[int] = OMIT,
- max_context_words: typing.Optional[int] = OMIT,
- scroll_jump: typing.Optional[int] = OMIT,
- embedding_model: typing.Optional[RelatedQnAPageRequestEmbeddingModel] = OMIT,
- dense_weight: typing.Optional[float] = OMIT,
- avoid_repetition: typing.Optional[bool] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[float] = OMIT,
- max_tokens: typing.Optional[int] = OMIT,
- sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[RelatedQnAPageRequestResponseFormatType] = OMIT,
- serp_search_location: typing.Optional[SerpSearchLocation] = OMIT,
- scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
- serp_search_type: typing.Optional[SerpSearchType] = OMIT,
- scaleserp_search_field: typing.Optional[str] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> RelatedQnAPageResponse:
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormRelatedQnaMaker:
"""
Parameters
----------
- search_query : str
-
- site_filter : str
-
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- task_instructions : typing.Optional[str]
-
- query_instructions : typing.Optional[str]
-
- selected_model : typing.Optional[RelatedQnAPageRequestSelectedModel]
-
- max_search_urls : typing.Optional[int]
-
- max_references : typing.Optional[int]
-
- max_context_words : typing.Optional[int]
-
- scroll_jump : typing.Optional[int]
-
- embedding_model : typing.Optional[RelatedQnAPageRequestEmbeddingModel]
-
- dense_weight : typing.Optional[float]
-
- Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
- Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
-
-
- avoid_repetition : typing.Optional[bool]
-
- num_outputs : typing.Optional[int]
-
- quality : typing.Optional[float]
-
- max_tokens : typing.Optional[int]
-
- sampling_temperature : typing.Optional[float]
-
- response_format_type : typing.Optional[RelatedQnAPageRequestResponseFormatType]
-
- serp_search_location : typing.Optional[SerpSearchLocation]
-
- scaleserp_locations : typing.Optional[typing.Sequence[str]]
- DEPRECATED: use `serp_search_location` instead
-
- serp_search_type : typing.Optional[SerpSearchType]
-
- scaleserp_search_field : typing.Optional[str]
- DEPRECATED: use `serp_search_type` instead
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- RelatedQnAPageResponse
+ BodyAsyncFormRelatedQnaMaker
Successful Response
Examples
@@ -602,47 +297,21 @@ def seo_people_also_ask(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.seo_people_also_ask(
- search_query="search_query",
- site_filter="site_filter",
- )
+ client.seo_people_also_ask()
"""
_response = self._client_wrapper.httpx_client.request(
- "v3/related-qna-maker/async",
+ "v3/related-qna-maker/async/form",
method="POST",
params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "search_query": search_query,
- "site_filter": site_filter,
- "task_instructions": task_instructions,
- "query_instructions": query_instructions,
- "selected_model": selected_model,
- "max_search_urls": max_search_urls,
- "max_references": max_references,
- "max_context_words": max_context_words,
- "scroll_jump": scroll_jump,
- "embedding_model": embedding_model,
- "dense_weight": dense_weight,
- "avoid_repetition": avoid_repetition,
- "num_outputs": num_outputs,
- "quality": quality,
- "max_tokens": max_tokens,
- "sampling_temperature": sampling_temperature,
- "response_format_type": response_format_type,
- "serp_search_location": serp_search_location,
- "scaleserp_locations": scaleserp_locations,
- "serp_search_type": serp_search_type,
- "scaleserp_search_field": scaleserp_search_field,
- "settings": settings,
- },
request_options=request_options,
- omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(RelatedQnAPageResponse, parse_obj_as(type_=RelatedQnAPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(BodyAsyncFormRelatedQnaMaker, parse_obj_as(type_=BodyAsyncFormRelatedQnaMaker, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
if _response.status_code == 402:
raise PaymentRequiredError(
typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
@@ -655,93 +324,29 @@ def seo_people_also_ask(
raise TooManyRequestsError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
)
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
def seo_content(
- self,
- *,
- search_query: str,
- keywords: str,
- title: str,
- company_url: str,
- example_id: typing.Optional[str] = None,
- task_instructions: typing.Optional[str] = OMIT,
- enable_html: typing.Optional[bool] = OMIT,
- selected_model: typing.Optional[SeoSummaryPageRequestSelectedModel] = OMIT,
- max_search_urls: typing.Optional[int] = OMIT,
- enable_crosslinks: typing.Optional[bool] = OMIT,
- seed: typing.Optional[int] = OMIT,
- avoid_repetition: typing.Optional[bool] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[float] = OMIT,
- max_tokens: typing.Optional[int] = OMIT,
- sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[SeoSummaryPageRequestResponseFormatType] = OMIT,
- serp_search_location: typing.Optional[SerpSearchLocation] = OMIT,
- scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
- serp_search_type: typing.Optional[SerpSearchType] = OMIT,
- scaleserp_search_field: typing.Optional[str] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> SeoSummaryPageResponse:
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormSeoSummary:
"""
Parameters
----------
- search_query : str
-
- keywords : str
-
- title : str
-
- company_url : str
-
example_id : typing.Optional[str]
- task_instructions : typing.Optional[str]
-
- enable_html : typing.Optional[bool]
-
- selected_model : typing.Optional[SeoSummaryPageRequestSelectedModel]
-
- max_search_urls : typing.Optional[int]
-
- enable_crosslinks : typing.Optional[bool]
-
- seed : typing.Optional[int]
-
- avoid_repetition : typing.Optional[bool]
-
- num_outputs : typing.Optional[int]
-
- quality : typing.Optional[float]
-
- max_tokens : typing.Optional[int]
-
- sampling_temperature : typing.Optional[float]
-
- response_format_type : typing.Optional[SeoSummaryPageRequestResponseFormatType]
-
- serp_search_location : typing.Optional[SerpSearchLocation]
-
- scaleserp_locations : typing.Optional[typing.Sequence[str]]
- DEPRECATED: use `serp_search_location` instead
-
- serp_search_type : typing.Optional[SerpSearchType]
-
- scaleserp_search_field : typing.Optional[str]
- DEPRECATED: use `serp_search_type` instead
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- SeoSummaryPageResponse
+ BodyAsyncFormSeoSummary
Successful Response
Examples
@@ -751,46 +356,21 @@ def seo_content(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.seo_content(
- search_query="search_query",
- keywords="keywords",
- title="title",
- company_url="company_url",
- )
+ client.seo_content()
"""
_response = self._client_wrapper.httpx_client.request(
- "v3/SEOSummary/async",
+ "v3/SEOSummary/async/form",
method="POST",
params={"example_id": example_id},
- json={
- "search_query": search_query,
- "keywords": keywords,
- "title": title,
- "company_url": company_url,
- "task_instructions": task_instructions,
- "enable_html": enable_html,
- "selected_model": selected_model,
- "max_search_urls": max_search_urls,
- "enable_crosslinks": enable_crosslinks,
- "seed": seed,
- "avoid_repetition": avoid_repetition,
- "num_outputs": num_outputs,
- "quality": quality,
- "max_tokens": max_tokens,
- "sampling_temperature": sampling_temperature,
- "response_format_type": response_format_type,
- "serp_search_location": serp_search_location,
- "scaleserp_locations": scaleserp_locations,
- "serp_search_type": serp_search_type,
- "scaleserp_search_field": scaleserp_search_field,
- "settings": settings,
- },
request_options=request_options,
- omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(SeoSummaryPageResponse, parse_obj_as(type_=SeoSummaryPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(BodyAsyncFormSeoSummary, parse_obj_as(type_=BodyAsyncFormSeoSummary, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
if _response.status_code == 402:
raise PaymentRequiredError(
typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
@@ -803,107 +383,29 @@ def seo_content(
raise TooManyRequestsError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
)
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
def web_search_llm(
- self,
- *,
- search_query: str,
- site_filter: str,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- task_instructions: typing.Optional[str] = OMIT,
- query_instructions: typing.Optional[str] = OMIT,
- selected_model: typing.Optional[GoogleGptPageRequestSelectedModel] = OMIT,
- max_search_urls: typing.Optional[int] = OMIT,
- max_references: typing.Optional[int] = OMIT,
- max_context_words: typing.Optional[int] = OMIT,
- scroll_jump: typing.Optional[int] = OMIT,
- embedding_model: typing.Optional[GoogleGptPageRequestEmbeddingModel] = OMIT,
- dense_weight: typing.Optional[float] = OMIT,
- avoid_repetition: typing.Optional[bool] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[float] = OMIT,
- max_tokens: typing.Optional[int] = OMIT,
- sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[GoogleGptPageRequestResponseFormatType] = OMIT,
- serp_search_location: typing.Optional[SerpSearchLocation] = OMIT,
- scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
- serp_search_type: typing.Optional[SerpSearchType] = OMIT,
- scaleserp_search_field: typing.Optional[str] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> GoogleGptPageResponse:
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormGoogleGpt:
"""
Parameters
----------
- search_query : str
-
- site_filter : str
-
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- task_instructions : typing.Optional[str]
-
- query_instructions : typing.Optional[str]
-
- selected_model : typing.Optional[GoogleGptPageRequestSelectedModel]
-
- max_search_urls : typing.Optional[int]
-
- max_references : typing.Optional[int]
-
- max_context_words : typing.Optional[int]
-
- scroll_jump : typing.Optional[int]
-
- embedding_model : typing.Optional[GoogleGptPageRequestEmbeddingModel]
-
- dense_weight : typing.Optional[float]
-
- Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
- Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
-
-
- avoid_repetition : typing.Optional[bool]
-
- num_outputs : typing.Optional[int]
-
- quality : typing.Optional[float]
-
- max_tokens : typing.Optional[int]
-
- sampling_temperature : typing.Optional[float]
-
- response_format_type : typing.Optional[GoogleGptPageRequestResponseFormatType]
-
- serp_search_location : typing.Optional[SerpSearchLocation]
-
- scaleserp_locations : typing.Optional[typing.Sequence[str]]
- DEPRECATED: use `serp_search_location` instead
-
- serp_search_type : typing.Optional[SerpSearchType]
-
- scaleserp_search_field : typing.Optional[str]
- DEPRECATED: use `serp_search_type` instead
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- GoogleGptPageResponse
+ BodyAsyncFormGoogleGpt
Successful Response
Examples
@@ -913,47 +415,21 @@ def web_search_llm(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.web_search_llm(
- search_query="search_query",
- site_filter="site_filter",
- )
+ client.web_search_llm()
"""
_response = self._client_wrapper.httpx_client.request(
- "v3/google-gpt/async",
+ "v3/google-gpt/async/form",
method="POST",
params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "search_query": search_query,
- "site_filter": site_filter,
- "task_instructions": task_instructions,
- "query_instructions": query_instructions,
- "selected_model": selected_model,
- "max_search_urls": max_search_urls,
- "max_references": max_references,
- "max_context_words": max_context_words,
- "scroll_jump": scroll_jump,
- "embedding_model": embedding_model,
- "dense_weight": dense_weight,
- "avoid_repetition": avoid_repetition,
- "num_outputs": num_outputs,
- "quality": quality,
- "max_tokens": max_tokens,
- "sampling_temperature": sampling_temperature,
- "response_format_type": response_format_type,
- "serp_search_location": serp_search_location,
- "scaleserp_locations": scaleserp_locations,
- "serp_search_type": serp_search_type,
- "scaleserp_search_field": scaleserp_search_field,
- "settings": settings,
- },
request_options=request_options,
- omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(GoogleGptPageResponse, parse_obj_as(type_=GoogleGptPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(BodyAsyncFormGoogleGpt, parse_obj_as(type_=BodyAsyncFormGoogleGpt, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
if _response.status_code == 402:
raise PaymentRequiredError(
typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
@@ -966,65 +442,29 @@ def web_search_llm(
raise TooManyRequestsError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
)
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
def personalize_email(
- self,
- *,
- email_address: str,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- input_prompt: typing.Optional[str] = OMIT,
- selected_model: typing.Optional[SocialLookupEmailPageRequestSelectedModel] = OMIT,
- avoid_repetition: typing.Optional[bool] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[float] = OMIT,
- max_tokens: typing.Optional[int] = OMIT,
- sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[SocialLookupEmailPageRequestResponseFormatType] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> SocialLookupEmailPageResponse:
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormSocialLookupEmail:
"""
Parameters
----------
- email_address : str
-
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- input_prompt : typing.Optional[str]
-
- selected_model : typing.Optional[SocialLookupEmailPageRequestSelectedModel]
-
- avoid_repetition : typing.Optional[bool]
-
- num_outputs : typing.Optional[int]
-
- quality : typing.Optional[float]
-
- max_tokens : typing.Optional[int]
-
- sampling_temperature : typing.Optional[float]
-
- response_format_type : typing.Optional[SocialLookupEmailPageRequestResponseFormatType]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- SocialLookupEmailPageResponse
+ BodyAsyncFormSocialLookupEmail
Successful Response
Examples
@@ -1034,34 +474,21 @@ def personalize_email(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.personalize_email(
- email_address="email_address",
- )
+ client.personalize_email()
"""
_response = self._client_wrapper.httpx_client.request(
- "v3/SocialLookupEmail/async",
+ "v3/SocialLookupEmail/async/form",
method="POST",
params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "email_address": email_address,
- "input_prompt": input_prompt,
- "selected_model": selected_model,
- "avoid_repetition": avoid_repetition,
- "num_outputs": num_outputs,
- "quality": quality,
- "max_tokens": max_tokens,
- "sampling_temperature": sampling_temperature,
- "response_format_type": response_format_type,
- "settings": settings,
- },
request_options=request_options,
- omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(SocialLookupEmailPageResponse, parse_obj_as(type_=SocialLookupEmailPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(BodyAsyncFormSocialLookupEmail, parse_obj_as(type_=BodyAsyncFormSocialLookupEmail, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
if _response.status_code == 402:
raise PaymentRequiredError(
typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
@@ -1074,71 +501,29 @@ def personalize_email(
raise TooManyRequestsError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
)
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
def bulk_run(
- self,
- *,
- documents: typing.Sequence[str],
- run_urls: typing.Sequence[str],
- input_columns: typing.Dict[str, str],
- output_columns: typing.Dict[str, str],
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- eval_urls: typing.Optional[typing.Sequence[str]] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> BulkRunnerPageResponse:
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormBulkRunner:
"""
Parameters
----------
- documents : typing.Sequence[str]
-
- Upload or link to a CSV or google sheet that contains your sample input data.
- For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs.
- Remember to includes header names in your CSV too.
-
-
- run_urls : typing.Sequence[str]
-
- Provide one or more Gooey.AI workflow runs.
- You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them.
-
-
- input_columns : typing.Dict[str, str]
-
- For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it.
-
-
- output_columns : typing.Dict[str, str]
-
- For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data.
-
-
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- eval_urls : typing.Optional[typing.Sequence[str]]
-
- _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs.
-
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BulkRunnerPageResponse
+ BodyAsyncFormBulkRunner
Successful Response
Examples
@@ -1148,33 +533,21 @@ def bulk_run(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.bulk_run(
- documents=["documents"],
- run_urls=["run_urls"],
- input_columns={"key": "value"},
- output_columns={"key": "value"},
- )
+ client.bulk_run()
"""
_response = self._client_wrapper.httpx_client.request(
- "v3/bulk-runner/async",
+ "v3/bulk-runner/async/form",
method="POST",
params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "documents": documents,
- "run_urls": run_urls,
- "input_columns": input_columns,
- "output_columns": output_columns,
- "eval_urls": eval_urls,
- "settings": settings,
- },
request_options=request_options,
- omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BulkRunnerPageResponse, parse_obj_as(type_=BulkRunnerPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(BodyAsyncFormBulkRunner, parse_obj_as(type_=BodyAsyncFormBulkRunner, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
if _response.status_code == 402:
raise PaymentRequiredError(
typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
@@ -1187,79 +560,29 @@ def bulk_run(
raise TooManyRequestsError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
)
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
def synthesize_data(
- self,
- *,
- documents: typing.Sequence[str],
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- sheet_url: typing.Optional[str] = OMIT,
- selected_asr_model: typing.Optional[DocExtractPageRequestSelectedAsrModel] = OMIT,
- google_translate_target: typing.Optional[str] = OMIT,
- glossary_document: typing.Optional[str] = OMIT,
- task_instructions: typing.Optional[str] = OMIT,
- selected_model: typing.Optional[DocExtractPageRequestSelectedModel] = OMIT,
- avoid_repetition: typing.Optional[bool] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[float] = OMIT,
- max_tokens: typing.Optional[int] = OMIT,
- sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[DocExtractPageRequestResponseFormatType] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> DocExtractPageResponse:
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormDocExtract:
"""
Parameters
----------
- documents : typing.Sequence[str]
-
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- sheet_url : typing.Optional[str]
-
- selected_asr_model : typing.Optional[DocExtractPageRequestSelectedAsrModel]
-
- google_translate_target : typing.Optional[str]
-
- glossary_document : typing.Optional[str]
- Provide a glossary to customize translation and improve accuracy of domain-specific terms.
- If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
-
- task_instructions : typing.Optional[str]
-
- selected_model : typing.Optional[DocExtractPageRequestSelectedModel]
-
- avoid_repetition : typing.Optional[bool]
-
- num_outputs : typing.Optional[int]
-
- quality : typing.Optional[float]
-
- max_tokens : typing.Optional[int]
-
- sampling_temperature : typing.Optional[float]
-
- response_format_type : typing.Optional[DocExtractPageRequestResponseFormatType]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- DocExtractPageResponse
+ BodyAsyncFormDocExtract
Successful Response
Examples
@@ -1269,38 +592,21 @@ def synthesize_data(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.synthesize_data(
- documents=["documents"],
- )
+ client.synthesize_data()
"""
_response = self._client_wrapper.httpx_client.request(
- "v3/doc-extract/async",
+ "v3/doc-extract/async/form",
method="POST",
params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "documents": documents,
- "sheet_url": sheet_url,
- "selected_asr_model": selected_asr_model,
- "google_translate_target": google_translate_target,
- "glossary_document": glossary_document,
- "task_instructions": task_instructions,
- "selected_model": selected_model,
- "avoid_repetition": avoid_repetition,
- "num_outputs": num_outputs,
- "quality": quality,
- "max_tokens": max_tokens,
- "sampling_temperature": sampling_temperature,
- "response_format_type": response_format_type,
- "settings": settings,
- },
request_options=request_options,
- omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(DocExtractPageResponse, parse_obj_as(type_=DocExtractPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(BodyAsyncFormDocExtract, parse_obj_as(type_=BodyAsyncFormDocExtract, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
if _response.status_code == 402:
raise PaymentRequiredError(
typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
@@ -1313,62 +619,29 @@ def synthesize_data(
raise TooManyRequestsError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
)
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
def llm(
- self,
- *,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- input_prompt: typing.Optional[str] = OMIT,
- selected_models: typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]] = OMIT,
- avoid_repetition: typing.Optional[bool] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[float] = OMIT,
- max_tokens: typing.Optional[int] = OMIT,
- sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[CompareLlmPageRequestResponseFormatType] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> CompareLlmPageResponse:
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormCompareLlm:
"""
Parameters
----------
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- input_prompt : typing.Optional[str]
-
- selected_models : typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]]
-
- avoid_repetition : typing.Optional[bool]
-
- num_outputs : typing.Optional[int]
-
- quality : typing.Optional[float]
-
- max_tokens : typing.Optional[int]
-
- sampling_temperature : typing.Optional[float]
-
- response_format_type : typing.Optional[CompareLlmPageRequestResponseFormatType]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- CompareLlmPageResponse
+ BodyAsyncFormCompareLlm
Successful Response
Examples
@@ -1381,28 +654,18 @@ def llm(
client.llm()
"""
_response = self._client_wrapper.httpx_client.request(
- "v3/CompareLLM/async",
+ "v3/CompareLLM/async/form",
method="POST",
params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "input_prompt": input_prompt,
- "selected_models": selected_models,
- "avoid_repetition": avoid_repetition,
- "num_outputs": num_outputs,
- "quality": quality,
- "max_tokens": max_tokens,
- "sampling_temperature": sampling_temperature,
- "response_format_type": response_format_type,
- "settings": settings,
- },
request_options=request_options,
- omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(CompareLlmPageResponse, parse_obj_as(type_=CompareLlmPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(BodyAsyncFormCompareLlm, parse_obj_as(type_=BodyAsyncFormCompareLlm, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
if _response.status_code == 402:
raise PaymentRequiredError(
typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
@@ -1415,99 +678,29 @@ def llm(
raise TooManyRequestsError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
)
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
def rag(
- self,
- *,
- search_query: str,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- keyword_query: typing.Optional[DocSearchPageRequestKeywordQuery] = OMIT,
- documents: typing.Optional[typing.Sequence[str]] = OMIT,
- max_references: typing.Optional[int] = OMIT,
- max_context_words: typing.Optional[int] = OMIT,
- scroll_jump: typing.Optional[int] = OMIT,
- doc_extract_url: typing.Optional[str] = OMIT,
- embedding_model: typing.Optional[DocSearchPageRequestEmbeddingModel] = OMIT,
- dense_weight: typing.Optional[float] = OMIT,
- task_instructions: typing.Optional[str] = OMIT,
- query_instructions: typing.Optional[str] = OMIT,
- selected_model: typing.Optional[DocSearchPageRequestSelectedModel] = OMIT,
- citation_style: typing.Optional[DocSearchPageRequestCitationStyle] = OMIT,
- avoid_repetition: typing.Optional[bool] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[float] = OMIT,
- max_tokens: typing.Optional[int] = OMIT,
- sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[DocSearchPageRequestResponseFormatType] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> DocSearchPageResponse:
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormDocSearch:
"""
Parameters
----------
- search_query : str
-
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- keyword_query : typing.Optional[DocSearchPageRequestKeywordQuery]
-
- documents : typing.Optional[typing.Sequence[str]]
-
- max_references : typing.Optional[int]
-
- max_context_words : typing.Optional[int]
-
- scroll_jump : typing.Optional[int]
-
- doc_extract_url : typing.Optional[str]
-
- embedding_model : typing.Optional[DocSearchPageRequestEmbeddingModel]
-
- dense_weight : typing.Optional[float]
-
- Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
- Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
-
-
- task_instructions : typing.Optional[str]
-
- query_instructions : typing.Optional[str]
-
- selected_model : typing.Optional[DocSearchPageRequestSelectedModel]
-
- citation_style : typing.Optional[DocSearchPageRequestCitationStyle]
-
- avoid_repetition : typing.Optional[bool]
-
- num_outputs : typing.Optional[int]
-
- quality : typing.Optional[float]
-
- max_tokens : typing.Optional[int]
-
- sampling_temperature : typing.Optional[float]
-
- response_format_type : typing.Optional[DocSearchPageRequestResponseFormatType]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- DocSearchPageResponse
+ BodyAsyncFormDocSearch
Successful Response
Examples
@@ -1517,44 +710,21 @@ def rag(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.rag(
- search_query="search_query",
- )
+ client.rag()
"""
_response = self._client_wrapper.httpx_client.request(
- "v3/doc-search/async",
+ "v3/doc-search/async/form",
method="POST",
params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "search_query": search_query,
- "keyword_query": keyword_query,
- "documents": documents,
- "max_references": max_references,
- "max_context_words": max_context_words,
- "scroll_jump": scroll_jump,
- "doc_extract_url": doc_extract_url,
- "embedding_model": embedding_model,
- "dense_weight": dense_weight,
- "task_instructions": task_instructions,
- "query_instructions": query_instructions,
- "selected_model": selected_model,
- "citation_style": citation_style,
- "avoid_repetition": avoid_repetition,
- "num_outputs": num_outputs,
- "quality": quality,
- "max_tokens": max_tokens,
- "sampling_temperature": sampling_temperature,
- "response_format_type": response_format_type,
- "settings": settings,
- },
request_options=request_options,
- omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(DocSearchPageResponse, parse_obj_as(type_=DocSearchPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(BodyAsyncFormDocSearch, parse_obj_as(type_=BodyAsyncFormDocSearch, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
if _response.status_code == 402:
raise PaymentRequiredError(
typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
@@ -1567,77 +737,29 @@ def rag(
raise TooManyRequestsError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
)
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
def doc_summary(
- self,
- *,
- documents: typing.Sequence[str],
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- task_instructions: typing.Optional[str] = OMIT,
- merge_instructions: typing.Optional[str] = OMIT,
- selected_model: typing.Optional[DocSummaryPageRequestSelectedModel] = OMIT,
- chain_type: typing.Optional[typing.Literal["map_reduce"]] = OMIT,
- selected_asr_model: typing.Optional[DocSummaryPageRequestSelectedAsrModel] = OMIT,
- google_translate_target: typing.Optional[str] = OMIT,
- avoid_repetition: typing.Optional[bool] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[float] = OMIT,
- max_tokens: typing.Optional[int] = OMIT,
- sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[DocSummaryPageRequestResponseFormatType] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> DocSummaryPageResponse:
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormDocSummary:
"""
Parameters
----------
- documents : typing.Sequence[str]
-
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- task_instructions : typing.Optional[str]
-
- merge_instructions : typing.Optional[str]
-
- selected_model : typing.Optional[DocSummaryPageRequestSelectedModel]
-
- chain_type : typing.Optional[typing.Literal["map_reduce"]]
-
- selected_asr_model : typing.Optional[DocSummaryPageRequestSelectedAsrModel]
-
- google_translate_target : typing.Optional[str]
-
- avoid_repetition : typing.Optional[bool]
-
- num_outputs : typing.Optional[int]
-
- quality : typing.Optional[float]
-
- max_tokens : typing.Optional[int]
-
- sampling_temperature : typing.Optional[float]
-
- response_format_type : typing.Optional[DocSummaryPageRequestResponseFormatType]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- DocSummaryPageResponse
+ BodyAsyncFormDocSummary
Successful Response
Examples
@@ -1647,38 +769,21 @@ def doc_summary(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.doc_summary(
- documents=["documents"],
- )
+ client.doc_summary()
"""
_response = self._client_wrapper.httpx_client.request(
- "v3/doc-summary/async",
+ "v3/doc-summary/async/form",
method="POST",
params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "documents": documents,
- "task_instructions": task_instructions,
- "merge_instructions": merge_instructions,
- "selected_model": selected_model,
- "chain_type": chain_type,
- "selected_asr_model": selected_asr_model,
- "google_translate_target": google_translate_target,
- "avoid_repetition": avoid_repetition,
- "num_outputs": num_outputs,
- "quality": quality,
- "max_tokens": max_tokens,
- "sampling_temperature": sampling_temperature,
- "response_format_type": response_format_type,
- "settings": settings,
- },
request_options=request_options,
- omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(DocSummaryPageResponse, parse_obj_as(type_=DocSummaryPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(BodyAsyncFormDocSummary, parse_obj_as(type_=BodyAsyncFormDocSummary, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
if _response.status_code == 402:
raise PaymentRequiredError(
typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
@@ -1691,117 +796,29 @@ def doc_summary(
raise TooManyRequestsError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
)
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
def lipsync_tts(
- self,
- *,
- text_prompt: str,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- tts_provider: typing.Optional[LipsyncTtsPageRequestTtsProvider] = OMIT,
- uberduck_voice_name: typing.Optional[str] = OMIT,
- uberduck_speaking_rate: typing.Optional[float] = OMIT,
- google_voice_name: typing.Optional[str] = OMIT,
- google_speaking_rate: typing.Optional[float] = OMIT,
- google_pitch: typing.Optional[float] = OMIT,
- bark_history_prompt: typing.Optional[str] = OMIT,
- elevenlabs_voice_name: typing.Optional[str] = OMIT,
- elevenlabs_api_key: typing.Optional[str] = OMIT,
- elevenlabs_voice_id: typing.Optional[str] = OMIT,
- elevenlabs_model: typing.Optional[str] = OMIT,
- elevenlabs_stability: typing.Optional[float] = OMIT,
- elevenlabs_similarity_boost: typing.Optional[float] = OMIT,
- elevenlabs_style: typing.Optional[float] = OMIT,
- elevenlabs_speaker_boost: typing.Optional[bool] = OMIT,
- azure_voice_name: typing.Optional[str] = OMIT,
- openai_voice_name: typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] = OMIT,
- openai_tts_model: typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] = OMIT,
- input_face: typing.Optional[str] = OMIT,
- face_padding_top: typing.Optional[int] = OMIT,
- face_padding_bottom: typing.Optional[int] = OMIT,
- face_padding_left: typing.Optional[int] = OMIT,
- face_padding_right: typing.Optional[int] = OMIT,
- sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT,
- selected_model: typing.Optional[LipsyncTtsPageRequestSelectedModel] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> LipsyncTtsPageResponse:
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormLipsyncTts:
"""
Parameters
----------
- text_prompt : str
-
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- tts_provider : typing.Optional[LipsyncTtsPageRequestTtsProvider]
-
- uberduck_voice_name : typing.Optional[str]
-
- uberduck_speaking_rate : typing.Optional[float]
-
- google_voice_name : typing.Optional[str]
-
- google_speaking_rate : typing.Optional[float]
-
- google_pitch : typing.Optional[float]
-
- bark_history_prompt : typing.Optional[str]
-
- elevenlabs_voice_name : typing.Optional[str]
- Use `elevenlabs_voice_id` instead
-
- elevenlabs_api_key : typing.Optional[str]
-
- elevenlabs_voice_id : typing.Optional[str]
-
- elevenlabs_model : typing.Optional[str]
-
- elevenlabs_stability : typing.Optional[float]
-
- elevenlabs_similarity_boost : typing.Optional[float]
-
- elevenlabs_style : typing.Optional[float]
-
- elevenlabs_speaker_boost : typing.Optional[bool]
-
- azure_voice_name : typing.Optional[str]
-
- openai_voice_name : typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName]
-
- openai_tts_model : typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel]
-
- input_face : typing.Optional[str]
-
- face_padding_top : typing.Optional[int]
-
- face_padding_bottom : typing.Optional[int]
-
- face_padding_left : typing.Optional[int]
-
- face_padding_right : typing.Optional[int]
-
- sadtalker_settings : typing.Optional[SadTalkerSettings]
-
- selected_model : typing.Optional[LipsyncTtsPageRequestSelectedModel]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- LipsyncTtsPageResponse
+ BodyAsyncFormLipsyncTts
Successful Response
Examples
@@ -1811,51 +828,21 @@ def lipsync_tts(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.lipsync_tts(
- text_prompt="text_prompt",
- )
+ client.lipsync_tts()
"""
_response = self._client_wrapper.httpx_client.request(
- "v3/LipsyncTTS/async",
+ "v3/LipsyncTTS/async/form",
method="POST",
params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "text_prompt": text_prompt,
- "tts_provider": tts_provider,
- "uberduck_voice_name": uberduck_voice_name,
- "uberduck_speaking_rate": uberduck_speaking_rate,
- "google_voice_name": google_voice_name,
- "google_speaking_rate": google_speaking_rate,
- "google_pitch": google_pitch,
- "bark_history_prompt": bark_history_prompt,
- "elevenlabs_voice_name": elevenlabs_voice_name,
- "elevenlabs_api_key": elevenlabs_api_key,
- "elevenlabs_voice_id": elevenlabs_voice_id,
- "elevenlabs_model": elevenlabs_model,
- "elevenlabs_stability": elevenlabs_stability,
- "elevenlabs_similarity_boost": elevenlabs_similarity_boost,
- "elevenlabs_style": elevenlabs_style,
- "elevenlabs_speaker_boost": elevenlabs_speaker_boost,
- "azure_voice_name": azure_voice_name,
- "openai_voice_name": openai_voice_name,
- "openai_tts_model": openai_tts_model,
- "input_face": input_face,
- "face_padding_top": face_padding_top,
- "face_padding_bottom": face_padding_bottom,
- "face_padding_left": face_padding_left,
- "face_padding_right": face_padding_right,
- "sadtalker_settings": sadtalker_settings,
- "selected_model": selected_model,
- "settings": settings,
- },
request_options=request_options,
- omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(LipsyncTtsPageResponse, parse_obj_as(type_=LipsyncTtsPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(BodyAsyncFormLipsyncTts, parse_obj_as(type_=BodyAsyncFormLipsyncTts, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
if _response.status_code == 402:
raise PaymentRequiredError(
typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
@@ -1868,96 +855,29 @@ def lipsync_tts(
raise TooManyRequestsError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
)
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
def text_to_speech(
- self,
- *,
- text_prompt: str,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- tts_provider: typing.Optional[TextToSpeechPageRequestTtsProvider] = OMIT,
- uberduck_voice_name: typing.Optional[str] = OMIT,
- uberduck_speaking_rate: typing.Optional[float] = OMIT,
- google_voice_name: typing.Optional[str] = OMIT,
- google_speaking_rate: typing.Optional[float] = OMIT,
- google_pitch: typing.Optional[float] = OMIT,
- bark_history_prompt: typing.Optional[str] = OMIT,
- elevenlabs_voice_name: typing.Optional[str] = OMIT,
- elevenlabs_api_key: typing.Optional[str] = OMIT,
- elevenlabs_voice_id: typing.Optional[str] = OMIT,
- elevenlabs_model: typing.Optional[str] = OMIT,
- elevenlabs_stability: typing.Optional[float] = OMIT,
- elevenlabs_similarity_boost: typing.Optional[float] = OMIT,
- elevenlabs_style: typing.Optional[float] = OMIT,
- elevenlabs_speaker_boost: typing.Optional[bool] = OMIT,
- azure_voice_name: typing.Optional[str] = OMIT,
- openai_voice_name: typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] = OMIT,
- openai_tts_model: typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> TextToSpeechPageResponse:
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormTextToSpeech:
"""
Parameters
----------
- text_prompt : str
-
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- tts_provider : typing.Optional[TextToSpeechPageRequestTtsProvider]
-
- uberduck_voice_name : typing.Optional[str]
-
- uberduck_speaking_rate : typing.Optional[float]
-
- google_voice_name : typing.Optional[str]
-
- google_speaking_rate : typing.Optional[float]
-
- google_pitch : typing.Optional[float]
-
- bark_history_prompt : typing.Optional[str]
-
- elevenlabs_voice_name : typing.Optional[str]
- Use `elevenlabs_voice_id` instead
-
- elevenlabs_api_key : typing.Optional[str]
-
- elevenlabs_voice_id : typing.Optional[str]
-
- elevenlabs_model : typing.Optional[str]
-
- elevenlabs_stability : typing.Optional[float]
-
- elevenlabs_similarity_boost : typing.Optional[float]
-
- elevenlabs_style : typing.Optional[float]
-
- elevenlabs_speaker_boost : typing.Optional[bool]
-
- azure_voice_name : typing.Optional[str]
-
- openai_voice_name : typing.Optional[TextToSpeechPageRequestOpenaiVoiceName]
-
- openai_tts_model : typing.Optional[TextToSpeechPageRequestOpenaiTtsModel]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- TextToSpeechPageResponse
+ BodyAsyncFormTextToSpeech
Successful Response
Examples
@@ -1967,44 +887,21 @@ def text_to_speech(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.text_to_speech(
- text_prompt="text_prompt",
- )
+ client.text_to_speech()
"""
_response = self._client_wrapper.httpx_client.request(
- "v3/TextToSpeech/async",
+ "v3/TextToSpeech/async/form",
method="POST",
params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "text_prompt": text_prompt,
- "tts_provider": tts_provider,
- "uberduck_voice_name": uberduck_voice_name,
- "uberduck_speaking_rate": uberduck_speaking_rate,
- "google_voice_name": google_voice_name,
- "google_speaking_rate": google_speaking_rate,
- "google_pitch": google_pitch,
- "bark_history_prompt": bark_history_prompt,
- "elevenlabs_voice_name": elevenlabs_voice_name,
- "elevenlabs_api_key": elevenlabs_api_key,
- "elevenlabs_voice_id": elevenlabs_voice_id,
- "elevenlabs_model": elevenlabs_model,
- "elevenlabs_stability": elevenlabs_stability,
- "elevenlabs_similarity_boost": elevenlabs_similarity_boost,
- "elevenlabs_style": elevenlabs_style,
- "elevenlabs_speaker_boost": elevenlabs_speaker_boost,
- "azure_voice_name": azure_voice_name,
- "openai_voice_name": openai_voice_name,
- "openai_tts_model": openai_tts_model,
- "settings": settings,
- },
request_options=request_options,
- omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(TextToSpeechPageResponse, parse_obj_as(type_=TextToSpeechPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(BodyAsyncFormTextToSpeech, parse_obj_as(type_=BodyAsyncFormTextToSpeech, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
if _response.status_code == 402:
raise PaymentRequiredError(
typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
@@ -2017,68 +914,29 @@ def text_to_speech(
raise TooManyRequestsError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
)
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
def speech_recognition(
- self,
- *,
- documents: typing.Sequence[str],
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- selected_model: typing.Optional[AsrPageRequestSelectedModel] = OMIT,
- language: typing.Optional[str] = OMIT,
- translation_model: typing.Optional[AsrPageRequestTranslationModel] = OMIT,
- output_format: typing.Optional[AsrPageRequestOutputFormat] = OMIT,
- google_translate_target: typing.Optional[str] = OMIT,
- translation_source: typing.Optional[str] = OMIT,
- translation_target: typing.Optional[str] = OMIT,
- glossary_document: typing.Optional[str] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> AsrPageResponse:
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormAsr:
"""
Parameters
----------
- documents : typing.Sequence[str]
-
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- selected_model : typing.Optional[AsrPageRequestSelectedModel]
-
- language : typing.Optional[str]
-
- translation_model : typing.Optional[AsrPageRequestTranslationModel]
-
- output_format : typing.Optional[AsrPageRequestOutputFormat]
-
- google_translate_target : typing.Optional[str]
- use `translation_model` & `translation_target` instead.
-
- translation_source : typing.Optional[str]
-
- translation_target : typing.Optional[str]
-
- glossary_document : typing.Optional[str]
- Provide a glossary to customize translation and improve accuracy of domain-specific terms.
- If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- AsrPageResponse
+ BodyAsyncFormAsr
Successful Response
Examples
@@ -2088,34 +946,18 @@ def speech_recognition(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.speech_recognition(
- documents=["documents"],
- )
+ client.speech_recognition()
"""
_response = self._client_wrapper.httpx_client.request(
- "v3/asr/async",
- method="POST",
- params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "documents": documents,
- "selected_model": selected_model,
- "language": language,
- "translation_model": translation_model,
- "output_format": output_format,
- "google_translate_target": google_translate_target,
- "translation_source": translation_source,
- "translation_target": translation_target,
- "glossary_document": glossary_document,
- "settings": settings,
- },
- request_options=request_options,
- omit=OMIT,
+ "v3/asr/async/form", method="POST", params={"example_id": example_id}, request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(AsrPageResponse, parse_obj_as(type_=AsrPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(BodyAsyncFormAsr, parse_obj_as(type_=BodyAsyncFormAsr, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
if _response.status_code == 402:
raise PaymentRequiredError(
typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
@@ -2128,65 +970,29 @@ def speech_recognition(
raise TooManyRequestsError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
)
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
def text_to_music(
- self,
- *,
- text_prompt: str,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- negative_prompt: typing.Optional[str] = OMIT,
- duration_sec: typing.Optional[float] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[int] = OMIT,
- guidance_scale: typing.Optional[float] = OMIT,
- seed: typing.Optional[int] = OMIT,
- sd2upscaling: typing.Optional[bool] = OMIT,
- selected_models: typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> Text2AudioPageResponse:
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormText2Audio:
"""
Parameters
----------
- text_prompt : str
-
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- negative_prompt : typing.Optional[str]
-
- duration_sec : typing.Optional[float]
-
- num_outputs : typing.Optional[int]
-
- quality : typing.Optional[int]
-
- guidance_scale : typing.Optional[float]
-
- seed : typing.Optional[int]
-
- sd2upscaling : typing.Optional[bool]
-
- selected_models : typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- Text2AudioPageResponse
+ BodyAsyncFormText2Audio
Successful Response
Examples
@@ -2196,34 +1002,21 @@ def text_to_music(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.text_to_music(
- text_prompt="text_prompt",
- )
+ client.text_to_music()
"""
_response = self._client_wrapper.httpx_client.request(
- "v3/text2audio/async",
+ "v3/text2audio/async/form",
method="POST",
params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "text_prompt": text_prompt,
- "negative_prompt": negative_prompt,
- "duration_sec": duration_sec,
- "num_outputs": num_outputs,
- "quality": quality,
- "guidance_scale": guidance_scale,
- "seed": seed,
- "sd_2_upscaling": sd2upscaling,
- "selected_models": selected_models,
- "settings": settings,
- },
request_options=request_options,
- omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(Text2AudioPageResponse, parse_obj_as(type_=Text2AudioPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(BodyAsyncFormText2Audio, parse_obj_as(type_=BodyAsyncFormText2Audio, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
if _response.status_code == 402:
raise PaymentRequiredError(
typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
@@ -2236,55 +1029,29 @@ def text_to_music(
raise TooManyRequestsError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
)
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
def translate(
- self,
- *,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- texts: typing.Optional[typing.Sequence[str]] = OMIT,
- selected_model: typing.Optional[TranslationPageRequestSelectedModel] = OMIT,
- translation_source: typing.Optional[str] = OMIT,
- translation_target: typing.Optional[str] = OMIT,
- glossary_document: typing.Optional[str] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> TranslationPageResponse:
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormTranslate:
"""
Parameters
----------
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- texts : typing.Optional[typing.Sequence[str]]
-
- selected_model : typing.Optional[TranslationPageRequestSelectedModel]
-
- translation_source : typing.Optional[str]
-
- translation_target : typing.Optional[str]
-
- glossary_document : typing.Optional[str]
- Provide a glossary to customize translation and improve accuracy of domain-specific terms.
- If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- TranslationPageResponse
+ BodyAsyncFormTranslate
Successful Response
Examples
@@ -2297,25 +1064,15 @@ def translate(
client.translate()
"""
_response = self._client_wrapper.httpx_client.request(
- "v3/translate/async",
- method="POST",
- params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "texts": texts,
- "selected_model": selected_model,
- "translation_source": translation_source,
- "translation_target": translation_target,
- "glossary_document": glossary_document,
- "settings": settings,
- },
- request_options=request_options,
- omit=OMIT,
+ "v3/translate/async/form", method="POST", params={"example_id": example_id}, request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(TranslationPageResponse, parse_obj_as(type_=TranslationPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(BodyAsyncFormTranslate, parse_obj_as(type_=BodyAsyncFormTranslate, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
if _response.status_code == 402:
raise PaymentRequiredError(
typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
@@ -2328,80 +1085,29 @@ def translate(
raise TooManyRequestsError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
)
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
def remix_image(
- self,
- *,
- input_image: str,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- text_prompt: typing.Optional[str] = OMIT,
- selected_model: typing.Optional[Img2ImgPageRequestSelectedModel] = OMIT,
- selected_controlnet_model: typing.Optional[Img2ImgPageRequestSelectedControlnetModel] = OMIT,
- negative_prompt: typing.Optional[str] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[int] = OMIT,
- output_width: typing.Optional[int] = OMIT,
- output_height: typing.Optional[int] = OMIT,
- guidance_scale: typing.Optional[float] = OMIT,
- prompt_strength: typing.Optional[float] = OMIT,
- controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT,
- seed: typing.Optional[int] = OMIT,
- image_guidance_scale: typing.Optional[float] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> Img2ImgPageResponse:
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormImg2Img:
"""
Parameters
----------
- input_image : str
-
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- text_prompt : typing.Optional[str]
-
- selected_model : typing.Optional[Img2ImgPageRequestSelectedModel]
-
- selected_controlnet_model : typing.Optional[Img2ImgPageRequestSelectedControlnetModel]
-
- negative_prompt : typing.Optional[str]
-
- num_outputs : typing.Optional[int]
-
- quality : typing.Optional[int]
-
- output_width : typing.Optional[int]
-
- output_height : typing.Optional[int]
-
- guidance_scale : typing.Optional[float]
-
- prompt_strength : typing.Optional[float]
-
- controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]]
-
- seed : typing.Optional[int]
-
- image_guidance_scale : typing.Optional[float]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- Img2ImgPageResponse
+ BodyAsyncFormImg2Img
Successful Response
Examples
@@ -2411,39 +1117,18 @@ def remix_image(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.remix_image(
- input_image="input_image",
- )
+ client.remix_image()
"""
_response = self._client_wrapper.httpx_client.request(
- "v3/Img2Img/async",
- method="POST",
- params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "input_image": input_image,
- "text_prompt": text_prompt,
- "selected_model": selected_model,
- "selected_controlnet_model": selected_controlnet_model,
- "negative_prompt": negative_prompt,
- "num_outputs": num_outputs,
- "quality": quality,
- "output_width": output_width,
- "output_height": output_height,
- "guidance_scale": guidance_scale,
- "prompt_strength": prompt_strength,
- "controlnet_conditioning_scale": controlnet_conditioning_scale,
- "seed": seed,
- "image_guidance_scale": image_guidance_scale,
- "settings": settings,
- },
- request_options=request_options,
- omit=OMIT,
+ "v3/Img2Img/async/form", method="POST", params={"example_id": example_id}, request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(Img2ImgPageResponse, parse_obj_as(type_=Img2ImgPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(BodyAsyncFormImg2Img, parse_obj_as(type_=BodyAsyncFormImg2Img, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
if _response.status_code == 402:
raise PaymentRequiredError(
typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
@@ -2456,83 +1141,29 @@ def remix_image(
raise TooManyRequestsError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
)
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
def text_to_image(
- self,
- *,
- text_prompt: str,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- negative_prompt: typing.Optional[str] = OMIT,
- output_width: typing.Optional[int] = OMIT,
- output_height: typing.Optional[int] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[int] = OMIT,
- dall_e3quality: typing.Optional[str] = OMIT,
- dall_e3style: typing.Optional[str] = OMIT,
- guidance_scale: typing.Optional[float] = OMIT,
- seed: typing.Optional[int] = OMIT,
- sd2upscaling: typing.Optional[bool] = OMIT,
- selected_models: typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] = OMIT,
- scheduler: typing.Optional[CompareText2ImgPageRequestScheduler] = OMIT,
- edit_instruction: typing.Optional[str] = OMIT,
- image_guidance_scale: typing.Optional[float] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> CompareText2ImgPageResponse:
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormCompareText2Img:
"""
Parameters
----------
- text_prompt : str
-
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- negative_prompt : typing.Optional[str]
-
- output_width : typing.Optional[int]
-
- output_height : typing.Optional[int]
-
- num_outputs : typing.Optional[int]
-
- quality : typing.Optional[int]
-
- dall_e3quality : typing.Optional[str]
-
- dall_e3style : typing.Optional[str]
-
- guidance_scale : typing.Optional[float]
-
- seed : typing.Optional[int]
-
- sd2upscaling : typing.Optional[bool]
-
- selected_models : typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]]
-
- scheduler : typing.Optional[CompareText2ImgPageRequestScheduler]
-
- edit_instruction : typing.Optional[str]
-
- image_guidance_scale : typing.Optional[float]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- CompareText2ImgPageResponse
+ BodyAsyncFormCompareText2Img
Successful Response
Examples
@@ -2542,40 +1173,21 @@ def text_to_image(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.text_to_image(
- text_prompt="text_prompt",
- )
+ client.text_to_image()
"""
_response = self._client_wrapper.httpx_client.request(
- "v3/CompareText2Img/async",
+ "v3/CompareText2Img/async/form",
method="POST",
params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "text_prompt": text_prompt,
- "negative_prompt": negative_prompt,
- "output_width": output_width,
- "output_height": output_height,
- "num_outputs": num_outputs,
- "quality": quality,
- "dall_e_3_quality": dall_e3quality,
- "dall_e_3_style": dall_e3style,
- "guidance_scale": guidance_scale,
- "seed": seed,
- "sd_2_upscaling": sd2upscaling,
- "selected_models": selected_models,
- "scheduler": scheduler,
- "edit_instruction": edit_instruction,
- "image_guidance_scale": image_guidance_scale,
- "settings": settings,
- },
request_options=request_options,
- omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(CompareText2ImgPageResponse, parse_obj_as(type_=CompareText2ImgPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(BodyAsyncFormCompareText2Img, parse_obj_as(type_=BodyAsyncFormCompareText2Img, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
if _response.status_code == 402:
raise PaymentRequiredError(
typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
@@ -2588,83 +1200,29 @@ def text_to_image(
raise TooManyRequestsError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
)
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
def product_image(
- self,
- *,
- input_image: str,
- text_prompt: str,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- obj_scale: typing.Optional[float] = OMIT,
- obj_pos_x: typing.Optional[float] = OMIT,
- obj_pos_y: typing.Optional[float] = OMIT,
- mask_threshold: typing.Optional[float] = OMIT,
- selected_model: typing.Optional[ObjectInpaintingPageRequestSelectedModel] = OMIT,
- negative_prompt: typing.Optional[str] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[int] = OMIT,
- output_width: typing.Optional[int] = OMIT,
- output_height: typing.Optional[int] = OMIT,
- guidance_scale: typing.Optional[float] = OMIT,
- sd2upscaling: typing.Optional[bool] = OMIT,
- seed: typing.Optional[int] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> ObjectInpaintingPageResponse:
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormObjectInpainting:
"""
Parameters
----------
- input_image : str
-
- text_prompt : str
-
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- obj_scale : typing.Optional[float]
-
- obj_pos_x : typing.Optional[float]
-
- obj_pos_y : typing.Optional[float]
-
- mask_threshold : typing.Optional[float]
-
- selected_model : typing.Optional[ObjectInpaintingPageRequestSelectedModel]
-
- negative_prompt : typing.Optional[str]
-
- num_outputs : typing.Optional[int]
-
- quality : typing.Optional[int]
-
- output_width : typing.Optional[int]
-
- output_height : typing.Optional[int]
-
- guidance_scale : typing.Optional[float]
-
- sd2upscaling : typing.Optional[bool]
-
- seed : typing.Optional[int]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- ObjectInpaintingPageResponse
+ BodyAsyncFormObjectInpainting
Successful Response
Examples
@@ -2674,41 +1232,21 @@ def product_image(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.product_image(
- input_image="input_image",
- text_prompt="text_prompt",
- )
+ client.product_image()
"""
_response = self._client_wrapper.httpx_client.request(
- "v3/ObjectInpainting/async",
+ "v3/ObjectInpainting/async/form",
method="POST",
params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "input_image": input_image,
- "text_prompt": text_prompt,
- "obj_scale": obj_scale,
- "obj_pos_x": obj_pos_x,
- "obj_pos_y": obj_pos_y,
- "mask_threshold": mask_threshold,
- "selected_model": selected_model,
- "negative_prompt": negative_prompt,
- "num_outputs": num_outputs,
- "quality": quality,
- "output_width": output_width,
- "output_height": output_height,
- "guidance_scale": guidance_scale,
- "sd_2_upscaling": sd2upscaling,
- "seed": seed,
- "settings": settings,
- },
request_options=request_options,
- omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(ObjectInpaintingPageResponse, parse_obj_as(type_=ObjectInpaintingPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(BodyAsyncFormObjectInpainting, parse_obj_as(type_=BodyAsyncFormObjectInpainting, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
if _response.status_code == 402:
raise PaymentRequiredError(
typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
@@ -2721,80 +1259,29 @@ def product_image(
raise TooManyRequestsError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
)
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
def portrait(
- self,
- *,
- input_image: str,
- text_prompt: str,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- face_scale: typing.Optional[float] = OMIT,
- face_pos_x: typing.Optional[float] = OMIT,
- face_pos_y: typing.Optional[float] = OMIT,
- selected_model: typing.Optional[FaceInpaintingPageRequestSelectedModel] = OMIT,
- negative_prompt: typing.Optional[str] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[int] = OMIT,
- upscale_factor: typing.Optional[float] = OMIT,
- output_width: typing.Optional[int] = OMIT,
- output_height: typing.Optional[int] = OMIT,
- guidance_scale: typing.Optional[float] = OMIT,
- seed: typing.Optional[int] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> FaceInpaintingPageResponse:
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormFaceInpainting:
"""
Parameters
----------
- input_image : str
-
- text_prompt : str
-
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- face_scale : typing.Optional[float]
-
- face_pos_x : typing.Optional[float]
-
- face_pos_y : typing.Optional[float]
-
- selected_model : typing.Optional[FaceInpaintingPageRequestSelectedModel]
-
- negative_prompt : typing.Optional[str]
-
- num_outputs : typing.Optional[int]
-
- quality : typing.Optional[int]
-
- upscale_factor : typing.Optional[float]
-
- output_width : typing.Optional[int]
-
- output_height : typing.Optional[int]
-
- guidance_scale : typing.Optional[float]
-
- seed : typing.Optional[int]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- FaceInpaintingPageResponse
+ BodyAsyncFormFaceInpainting
Successful Response
Examples
@@ -2804,40 +1291,21 @@ def portrait(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.portrait(
- input_image="input_image",
- text_prompt="tony stark from the iron man",
- )
+ client.portrait()
"""
_response = self._client_wrapper.httpx_client.request(
- "v3/FaceInpainting/async",
+ "v3/FaceInpainting/async/form",
method="POST",
params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "input_image": input_image,
- "text_prompt": text_prompt,
- "face_scale": face_scale,
- "face_pos_x": face_pos_x,
- "face_pos_y": face_pos_y,
- "selected_model": selected_model,
- "negative_prompt": negative_prompt,
- "num_outputs": num_outputs,
- "quality": quality,
- "upscale_factor": upscale_factor,
- "output_width": output_width,
- "output_height": output_height,
- "guidance_scale": guidance_scale,
- "seed": seed,
- "settings": settings,
- },
request_options=request_options,
- omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(FaceInpaintingPageResponse, parse_obj_as(type_=FaceInpaintingPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(BodyAsyncFormFaceInpainting, parse_obj_as(type_=BodyAsyncFormFaceInpainting, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
if _response.status_code == 402:
raise PaymentRequiredError(
typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
@@ -2850,107 +1318,29 @@ def portrait(
raise TooManyRequestsError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
)
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
def image_from_email(
- self,
- *,
- text_prompt: str,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- email_address: typing.Optional[str] = OMIT,
- twitter_handle: typing.Optional[str] = OMIT,
- face_scale: typing.Optional[float] = OMIT,
- face_pos_x: typing.Optional[float] = OMIT,
- face_pos_y: typing.Optional[float] = OMIT,
- selected_model: typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] = OMIT,
- negative_prompt: typing.Optional[str] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[int] = OMIT,
- upscale_factor: typing.Optional[float] = OMIT,
- output_width: typing.Optional[int] = OMIT,
- output_height: typing.Optional[int] = OMIT,
- guidance_scale: typing.Optional[float] = OMIT,
- should_send_email: typing.Optional[bool] = OMIT,
- email_from: typing.Optional[str] = OMIT,
- email_cc: typing.Optional[str] = OMIT,
- email_bcc: typing.Optional[str] = OMIT,
- email_subject: typing.Optional[str] = OMIT,
- email_body: typing.Optional[str] = OMIT,
- email_body_enable_html: typing.Optional[bool] = OMIT,
- fallback_email_body: typing.Optional[str] = OMIT,
- seed: typing.Optional[int] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> EmailFaceInpaintingPageResponse:
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormEmailFaceInpainting:
"""
Parameters
----------
- text_prompt : str
-
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- email_address : typing.Optional[str]
-
- twitter_handle : typing.Optional[str]
-
- face_scale : typing.Optional[float]
-
- face_pos_x : typing.Optional[float]
-
- face_pos_y : typing.Optional[float]
-
- selected_model : typing.Optional[EmailFaceInpaintingPageRequestSelectedModel]
-
- negative_prompt : typing.Optional[str]
-
- num_outputs : typing.Optional[int]
-
- quality : typing.Optional[int]
-
- upscale_factor : typing.Optional[float]
-
- output_width : typing.Optional[int]
-
- output_height : typing.Optional[int]
-
- guidance_scale : typing.Optional[float]
-
- should_send_email : typing.Optional[bool]
-
- email_from : typing.Optional[str]
-
- email_cc : typing.Optional[str]
-
- email_bcc : typing.Optional[str]
-
- email_subject : typing.Optional[str]
-
- email_body : typing.Optional[str]
-
- email_body_enable_html : typing.Optional[bool]
-
- fallback_email_body : typing.Optional[str]
-
- seed : typing.Optional[int]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- EmailFaceInpaintingPageResponse
+ BodyAsyncFormEmailFaceInpainting
Successful Response
Examples
@@ -2960,49 +1350,21 @@ def image_from_email(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.image_from_email(
- email_address="sean@dara.network",
- text_prompt="winter's day in paris",
- )
+ client.image_from_email()
"""
_response = self._client_wrapper.httpx_client.request(
- "v3/EmailFaceInpainting/async",
+ "v3/EmailFaceInpainting/async/form",
method="POST",
params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "email_address": email_address,
- "twitter_handle": twitter_handle,
- "text_prompt": text_prompt,
- "face_scale": face_scale,
- "face_pos_x": face_pos_x,
- "face_pos_y": face_pos_y,
- "selected_model": selected_model,
- "negative_prompt": negative_prompt,
- "num_outputs": num_outputs,
- "quality": quality,
- "upscale_factor": upscale_factor,
- "output_width": output_width,
- "output_height": output_height,
- "guidance_scale": guidance_scale,
- "should_send_email": should_send_email,
- "email_from": email_from,
- "email_cc": email_cc,
- "email_bcc": email_bcc,
- "email_subject": email_subject,
- "email_body": email_body,
- "email_body_enable_html": email_body_enable_html,
- "fallback_email_body": fallback_email_body,
- "seed": seed,
- "settings": settings,
- },
request_options=request_options,
- omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(EmailFaceInpaintingPageResponse, parse_obj_as(type_=EmailFaceInpaintingPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(BodyAsyncFormEmailFaceInpainting, parse_obj_as(type_=BodyAsyncFormEmailFaceInpainting, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
if _response.status_code == 402:
raise PaymentRequiredError(
typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
@@ -3015,78 +1377,29 @@ def image_from_email(
raise TooManyRequestsError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
)
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
def image_from_web_search(
- self,
- *,
- search_query: str,
- text_prompt: str,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- serp_search_location: typing.Optional[SerpSearchLocation] = OMIT,
- scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
- selected_model: typing.Optional[GoogleImageGenPageRequestSelectedModel] = OMIT,
- negative_prompt: typing.Optional[str] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[int] = OMIT,
- guidance_scale: typing.Optional[float] = OMIT,
- prompt_strength: typing.Optional[float] = OMIT,
- sd2upscaling: typing.Optional[bool] = OMIT,
- seed: typing.Optional[int] = OMIT,
- image_guidance_scale: typing.Optional[float] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> GoogleImageGenPageResponse:
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormGoogleImageGen:
"""
Parameters
----------
- search_query : str
-
- text_prompt : str
-
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- serp_search_location : typing.Optional[SerpSearchLocation]
-
- scaleserp_locations : typing.Optional[typing.Sequence[str]]
- DEPRECATED: use `serp_search_location` instead
-
- selected_model : typing.Optional[GoogleImageGenPageRequestSelectedModel]
-
- negative_prompt : typing.Optional[str]
-
- num_outputs : typing.Optional[int]
-
- quality : typing.Optional[int]
-
- guidance_scale : typing.Optional[float]
-
- prompt_strength : typing.Optional[float]
-
- sd2upscaling : typing.Optional[bool]
-
- seed : typing.Optional[int]
-
- image_guidance_scale : typing.Optional[float]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- GoogleImageGenPageResponse
+ BodyAsyncFormGoogleImageGen
Successful Response
Examples
@@ -3096,39 +1409,21 @@ def image_from_web_search(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.image_from_web_search(
- search_query="search_query",
- text_prompt="text_prompt",
- )
+ client.image_from_web_search()
"""
_response = self._client_wrapper.httpx_client.request(
- "v3/GoogleImageGen/async",
+ "v3/GoogleImageGen/async/form",
method="POST",
params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "serp_search_location": serp_search_location,
- "scaleserp_locations": scaleserp_locations,
- "search_query": search_query,
- "text_prompt": text_prompt,
- "selected_model": selected_model,
- "negative_prompt": negative_prompt,
- "num_outputs": num_outputs,
- "quality": quality,
- "guidance_scale": guidance_scale,
- "prompt_strength": prompt_strength,
- "sd_2_upscaling": sd2upscaling,
- "seed": seed,
- "image_guidance_scale": image_guidance_scale,
- "settings": settings,
- },
request_options=request_options,
- omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(GoogleImageGenPageResponse, parse_obj_as(type_=GoogleImageGenPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(BodyAsyncFormGoogleImageGen, parse_obj_as(type_=BodyAsyncFormGoogleImageGen, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
if _response.status_code == 402:
raise PaymentRequiredError(
typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
@@ -3141,62 +1436,29 @@ def image_from_web_search(
raise TooManyRequestsError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
)
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
def remove_background(
- self,
- *,
- input_image: str,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- selected_model: typing.Optional[ImageSegmentationPageRequestSelectedModel] = OMIT,
- mask_threshold: typing.Optional[float] = OMIT,
- rect_persepective_transform: typing.Optional[bool] = OMIT,
- reflection_opacity: typing.Optional[float] = OMIT,
- obj_scale: typing.Optional[float] = OMIT,
- obj_pos_x: typing.Optional[float] = OMIT,
- obj_pos_y: typing.Optional[float] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> ImageSegmentationPageResponse:
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormImageSegmentation:
"""
Parameters
----------
- input_image : str
-
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- selected_model : typing.Optional[ImageSegmentationPageRequestSelectedModel]
-
- mask_threshold : typing.Optional[float]
-
- rect_persepective_transform : typing.Optional[bool]
-
- reflection_opacity : typing.Optional[float]
-
- obj_scale : typing.Optional[float]
-
- obj_pos_x : typing.Optional[float]
-
- obj_pos_y : typing.Optional[float]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- ImageSegmentationPageResponse
+ BodyAsyncFormImageSegmentation
Successful Response
Examples
@@ -3206,33 +1468,21 @@ def remove_background(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.remove_background(
- input_image="input_image",
- )
+ client.remove_background()
"""
_response = self._client_wrapper.httpx_client.request(
- "v3/ImageSegmentation/async",
+ "v3/ImageSegmentation/async/form",
method="POST",
params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "input_image": input_image,
- "selected_model": selected_model,
- "mask_threshold": mask_threshold,
- "rect_persepective_transform": rect_persepective_transform,
- "reflection_opacity": reflection_opacity,
- "obj_scale": obj_scale,
- "obj_pos_x": obj_pos_x,
- "obj_pos_y": obj_pos_y,
- "settings": settings,
- },
request_options=request_options,
- omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(ImageSegmentationPageResponse, parse_obj_as(type_=ImageSegmentationPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(BodyAsyncFormImageSegmentation, parse_obj_as(type_=BodyAsyncFormImageSegmentation, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
if _response.status_code == 402:
raise PaymentRequiredError(
typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
@@ -3245,56 +1495,29 @@ def remove_background(
raise TooManyRequestsError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
)
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
def upscale(
- self,
- *,
- scale: int,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- input_image: typing.Optional[str] = OMIT,
- input_video: typing.Optional[str] = OMIT,
- selected_models: typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]] = OMIT,
- selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> CompareUpscalerPageResponse:
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormCompareAiUpscalers:
"""
Parameters
----------
- scale : int
- The final upsampling scale of the image
-
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- input_image : typing.Optional[str]
- Input Image
-
- input_video : typing.Optional[str]
- Input Video
-
- selected_models : typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]]
-
- selected_bg_model : typing.Optional[typing.Literal["real_esrgan_x2"]]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- CompareUpscalerPageResponse
+ BodyAsyncFormCompareAiUpscalers
Successful Response
Examples
@@ -3304,30 +1527,21 @@ def upscale(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.upscale(
- scale=1,
- )
+ client.upscale()
"""
_response = self._client_wrapper.httpx_client.request(
- "v3/compare-ai-upscalers/async",
+ "v3/compare-ai-upscalers/async/form",
method="POST",
params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "input_image": input_image,
- "input_video": input_video,
- "scale": scale,
- "selected_models": selected_models,
- "selected_bg_model": selected_bg_model,
- "settings": settings,
- },
request_options=request_options,
- omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(CompareUpscalerPageResponse, parse_obj_as(type_=CompareUpscalerPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(BodyAsyncFormCompareAiUpscalers, parse_obj_as(type_=BodyAsyncFormCompareAiUpscalers, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
if _response.status_code == 402:
raise PaymentRequiredError(
typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
@@ -3340,44 +1554,29 @@ def upscale(
raise TooManyRequestsError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
)
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
def embed(
- self,
- *,
- texts: typing.Sequence[str],
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- selected_model: typing.Optional[EmbeddingsPageRequestSelectedModel] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> EmbeddingsPageResponse:
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormEmbeddings:
"""
Parameters
----------
- texts : typing.Sequence[str]
-
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- selected_model : typing.Optional[EmbeddingsPageRequestSelectedModel]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- EmbeddingsPageResponse
+ BodyAsyncFormEmbeddings
Successful Response
Examples
@@ -3387,27 +1586,21 @@ def embed(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.embed(
- texts=["texts"],
- )
+ client.embed()
"""
_response = self._client_wrapper.httpx_client.request(
- "v3/embeddings/async",
+ "v3/embeddings/async/form",
method="POST",
params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "texts": texts,
- "selected_model": selected_model,
- "settings": settings,
- },
request_options=request_options,
- omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(EmbeddingsPageResponse, parse_obj_as(type_=EmbeddingsPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(BodyAsyncFormEmbeddings, parse_obj_as(type_=BodyAsyncFormEmbeddings, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
if _response.status_code == 402:
raise PaymentRequiredError(
typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
@@ -3420,113 +1613,29 @@ def embed(
raise TooManyRequestsError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
)
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
def seo_people_also_ask_doc(
- self,
- *,
- search_query: str,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- keyword_query: typing.Optional[RelatedQnADocPageRequestKeywordQuery] = OMIT,
- documents: typing.Optional[typing.Sequence[str]] = OMIT,
- max_references: typing.Optional[int] = OMIT,
- max_context_words: typing.Optional[int] = OMIT,
- scroll_jump: typing.Optional[int] = OMIT,
- doc_extract_url: typing.Optional[str] = OMIT,
- embedding_model: typing.Optional[RelatedQnADocPageRequestEmbeddingModel] = OMIT,
- dense_weight: typing.Optional[float] = OMIT,
- task_instructions: typing.Optional[str] = OMIT,
- query_instructions: typing.Optional[str] = OMIT,
- selected_model: typing.Optional[RelatedQnADocPageRequestSelectedModel] = OMIT,
- citation_style: typing.Optional[RelatedQnADocPageRequestCitationStyle] = OMIT,
- avoid_repetition: typing.Optional[bool] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[float] = OMIT,
- max_tokens: typing.Optional[int] = OMIT,
- sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[RelatedQnADocPageRequestResponseFormatType] = OMIT,
- serp_search_location: typing.Optional[SerpSearchLocation] = OMIT,
- scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
- serp_search_type: typing.Optional[SerpSearchType] = OMIT,
- scaleserp_search_field: typing.Optional[str] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> RelatedQnADocPageResponse:
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormRelatedQnaMakerDoc:
"""
Parameters
----------
- search_query : str
-
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- keyword_query : typing.Optional[RelatedQnADocPageRequestKeywordQuery]
-
- documents : typing.Optional[typing.Sequence[str]]
-
- max_references : typing.Optional[int]
-
- max_context_words : typing.Optional[int]
-
- scroll_jump : typing.Optional[int]
-
- doc_extract_url : typing.Optional[str]
-
- embedding_model : typing.Optional[RelatedQnADocPageRequestEmbeddingModel]
-
- dense_weight : typing.Optional[float]
-
- Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
- Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
-
-
- task_instructions : typing.Optional[str]
-
- query_instructions : typing.Optional[str]
-
- selected_model : typing.Optional[RelatedQnADocPageRequestSelectedModel]
-
- citation_style : typing.Optional[RelatedQnADocPageRequestCitationStyle]
-
- avoid_repetition : typing.Optional[bool]
-
- num_outputs : typing.Optional[int]
-
- quality : typing.Optional[float]
-
- max_tokens : typing.Optional[int]
-
- sampling_temperature : typing.Optional[float]
-
- response_format_type : typing.Optional[RelatedQnADocPageRequestResponseFormatType]
-
- serp_search_location : typing.Optional[SerpSearchLocation]
-
- scaleserp_locations : typing.Optional[typing.Sequence[str]]
- DEPRECATED: use `serp_search_location` instead
-
- serp_search_type : typing.Optional[SerpSearchType]
-
- scaleserp_search_field : typing.Optional[str]
- DEPRECATED: use `serp_search_type` instead
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- RelatedQnADocPageResponse
+ BodyAsyncFormRelatedQnaMakerDoc
Successful Response
Examples
@@ -3536,48 +1645,21 @@ def seo_people_also_ask_doc(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.seo_people_also_ask_doc(
- search_query="search_query",
- )
+ client.seo_people_also_ask_doc()
"""
_response = self._client_wrapper.httpx_client.request(
- "v3/related-qna-maker-doc/async",
+ "v3/related-qna-maker-doc/async/form",
method="POST",
params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "search_query": search_query,
- "keyword_query": keyword_query,
- "documents": documents,
- "max_references": max_references,
- "max_context_words": max_context_words,
- "scroll_jump": scroll_jump,
- "doc_extract_url": doc_extract_url,
- "embedding_model": embedding_model,
- "dense_weight": dense_weight,
- "task_instructions": task_instructions,
- "query_instructions": query_instructions,
- "selected_model": selected_model,
- "citation_style": citation_style,
- "avoid_repetition": avoid_repetition,
- "num_outputs": num_outputs,
- "quality": quality,
- "max_tokens": max_tokens,
- "sampling_temperature": sampling_temperature,
- "response_format_type": response_format_type,
- "serp_search_location": serp_search_location,
- "scaleserp_locations": scaleserp_locations,
- "serp_search_type": serp_search_type,
- "scaleserp_search_field": scaleserp_search_field,
- "settings": settings,
- },
request_options=request_options,
- omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(RelatedQnADocPageResponse, parse_obj_as(type_=RelatedQnADocPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(BodyAsyncFormRelatedQnaMakerDoc, parse_obj_as(type_=BodyAsyncFormRelatedQnaMakerDoc, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
if _response.status_code == 402:
raise PaymentRequiredError(
typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
@@ -3590,6 +1672,10 @@ def seo_people_also_ask_doc(
raise TooManyRequestsError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
)
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
@@ -3625,147 +1711,2743 @@ def health_status_get(self, *, request_options: typing.Optional[RequestOptions]
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
+ def post_v3chyron_plant_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> ChyronPlantPageResponse:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
-class AsyncGooey:
- """
- Use this class to access the different functions within the SDK. You can instantiate any number of clients with different configuration that will propagate to these functions.
-
- Parameters
- ----------
- base_url : typing.Optional[str]
- The base url to use for requests from the client.
+ Returns
+ -------
+ ChyronPlantPageResponse
+ Successful Response
- environment : GooeyEnvironment
- The environment to use for requests from the client. from .environment import GooeyEnvironment
+ Examples
+ --------
+ from gooey import Gooey
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.post_v3chyron_plant_async()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/ChyronPlant/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(ChyronPlantPageResponse, parse_obj_as(type_=ChyronPlantPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+ def post_v3compare_llm_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> CompareLlmPageResponse:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Defaults to GooeyEnvironment.DEFAULT
+ Returns
+ -------
+ CompareLlmPageResponse
+ Successful Response
+ Examples
+ --------
+ from gooey import Gooey
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.post_v3compare_llm_async()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/CompareLLM/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(CompareLlmPageResponse, parse_obj_as(type_=CompareLlmPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
- api_key : typing.Optional[typing.Union[str, typing.Callable[[], str]]]
+ def post_v3compare_text2img_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> CompareText2ImgPageResponse:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ CompareText2ImgPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.post_v3compare_text2img_async()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/CompareText2Img/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(CompareText2ImgPageResponse, parse_obj_as(type_=CompareText2ImgPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def post_v3deforum_sd_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> DeforumSdPageResponse:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ DeforumSdPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.post_v3deforum_sd_async()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/DeforumSD/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(DeforumSdPageResponse, parse_obj_as(type_=DeforumSdPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def post_v3email_face_inpainting_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> EmailFaceInpaintingPageResponse:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ EmailFaceInpaintingPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.post_v3email_face_inpainting_async()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/EmailFaceInpainting/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(EmailFaceInpaintingPageResponse, parse_obj_as(type_=EmailFaceInpaintingPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def post_v3face_inpainting_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> FaceInpaintingPageResponse:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ FaceInpaintingPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.post_v3face_inpainting_async()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/FaceInpainting/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(FaceInpaintingPageResponse, parse_obj_as(type_=FaceInpaintingPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def post_v3google_image_gen_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> GoogleImageGenPageResponse:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GoogleImageGenPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.post_v3google_image_gen_async()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/GoogleImageGen/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(GoogleImageGenPageResponse, parse_obj_as(type_=GoogleImageGenPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def post_v3image_segmentation_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> ImageSegmentationPageResponse:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ImageSegmentationPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.post_v3image_segmentation_async()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/ImageSegmentation/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(ImageSegmentationPageResponse, parse_obj_as(type_=ImageSegmentationPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def post_v3img2img_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> Img2ImgPageResponse:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ Img2ImgPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.post_v3img2img_async()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/Img2Img/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(Img2ImgPageResponse, parse_obj_as(type_=Img2ImgPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def post_v3letter_writer_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> LetterWriterPageResponse:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ LetterWriterPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.post_v3letter_writer_async()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/LetterWriter/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(LetterWriterPageResponse, parse_obj_as(type_=LetterWriterPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def post_v3lipsync_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> LipsyncPageResponse:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ LipsyncPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.post_v3lipsync_async()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/Lipsync/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(LipsyncPageResponse, parse_obj_as(type_=LipsyncPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def post_v3lipsync_tts_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> LipsyncTtsPageResponse:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ LipsyncTtsPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.post_v3lipsync_tts_async()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/LipsyncTTS/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(LipsyncTtsPageResponse, parse_obj_as(type_=LipsyncTtsPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def post_v3object_inpainting_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> ObjectInpaintingPageResponse:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ObjectInpaintingPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.post_v3object_inpainting_async()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/ObjectInpainting/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(ObjectInpaintingPageResponse, parse_obj_as(type_=ObjectInpaintingPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def post_v3seo_summary_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> SeoSummaryPageResponse:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ SeoSummaryPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.post_v3seo_summary_async()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/SEOSummary/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(SeoSummaryPageResponse, parse_obj_as(type_=SeoSummaryPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def post_v3smart_gpt_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> SmartGptPageResponse:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ SmartGptPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.post_v3smart_gpt_async()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/SmartGPT/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(SmartGptPageResponse, parse_obj_as(type_=SmartGptPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def post_v3social_lookup_email_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> SocialLookupEmailPageResponse:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ SocialLookupEmailPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.post_v3social_lookup_email_async()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/SocialLookupEmail/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(SocialLookupEmailPageResponse, parse_obj_as(type_=SocialLookupEmailPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def post_v3text_to_speech_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> TextToSpeechPageResponse:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ TextToSpeechPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.post_v3text_to_speech_async()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/TextToSpeech/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(TextToSpeechPageResponse, parse_obj_as(type_=TextToSpeechPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def post_v3art_qr_code_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> QrCodeGeneratorPageResponse:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ QrCodeGeneratorPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.post_v3art_qr_code_async()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/art-qr-code/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(QrCodeGeneratorPageResponse, parse_obj_as(type_=QrCodeGeneratorPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def post_v3asr_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> AsrPageResponse:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsrPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.post_v3asr_async()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/asr/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(AsrPageResponse, parse_obj_as(type_=AsrPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def post_v3bulk_eval_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> BulkEvalPageResponse:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ BulkEvalPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.post_v3bulk_eval_async()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/bulk-eval/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(BulkEvalPageResponse, parse_obj_as(type_=BulkEvalPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def post_v3bulk_runner_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> BulkRunnerPageResponse:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ BulkRunnerPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.post_v3bulk_runner_async()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/bulk-runner/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(BulkRunnerPageResponse, parse_obj_as(type_=BulkRunnerPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def post_v3compare_ai_upscalers_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> CompareUpscalerPageResponse:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ CompareUpscalerPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.post_v3compare_ai_upscalers_async()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/compare-ai-upscalers/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(CompareUpscalerPageResponse, parse_obj_as(type_=CompareUpscalerPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def post_v3doc_extract_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> DocExtractPageResponse:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ DocExtractPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.post_v3doc_extract_async()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/doc-extract/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(DocExtractPageResponse, parse_obj_as(type_=DocExtractPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def post_v3doc_search_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> DocSearchPageResponse:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ DocSearchPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.post_v3doc_search_async()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/doc-search/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(DocSearchPageResponse, parse_obj_as(type_=DocSearchPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def post_v3doc_summary_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> DocSummaryPageResponse:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ DocSummaryPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.post_v3doc_summary_async()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/doc-summary/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(DocSummaryPageResponse, parse_obj_as(type_=DocSummaryPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def post_v3embeddings_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> EmbeddingsPageResponse:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ EmbeddingsPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.post_v3embeddings_async()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/embeddings/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(EmbeddingsPageResponse, parse_obj_as(type_=EmbeddingsPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def post_v3functions_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> FunctionsPageResponse:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ FunctionsPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.post_v3functions_async()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/functions/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(FunctionsPageResponse, parse_obj_as(type_=FunctionsPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def post_v3google_gpt_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> GoogleGptPageResponse:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GoogleGptPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.post_v3google_gpt_async()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/google-gpt/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(GoogleGptPageResponse, parse_obj_as(type_=GoogleGptPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def post_v3related_qna_maker_doc_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> RelatedQnADocPageResponse:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ RelatedQnADocPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.post_v3related_qna_maker_doc_async()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/related-qna-maker-doc/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(RelatedQnADocPageResponse, parse_obj_as(type_=RelatedQnADocPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def post_v3related_qna_maker_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> RelatedQnAPageResponse:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ RelatedQnAPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.post_v3related_qna_maker_async()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/related-qna-maker/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(RelatedQnAPageResponse, parse_obj_as(type_=RelatedQnAPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def post_v3text2audio_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> Text2AudioPageResponse:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ Text2AudioPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.post_v3text2audio_async()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/text2audio/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(Text2AudioPageResponse, parse_obj_as(type_=Text2AudioPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def post_v3translate_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> TranslationPageResponse:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ TranslationPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.post_v3translate_async()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/translate/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(TranslationPageResponse, parse_obj_as(type_=TranslationPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def post_v3video_bots_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> VideoBotsPageResponse:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ VideoBotsPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.post_v3video_bots_async()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/video-bots/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(VideoBotsPageResponse, parse_obj_as(type_=VideoBotsPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncGooey:
+ """
+ Use this class to access the different functions within the SDK. You can instantiate any number of clients with different configuration that will propagate to these functions.
+
+ Parameters
+ ----------
+ base_url : typing.Optional[str]
+ The base url to use for requests from the client.
+
+ environment : GooeyEnvironment
+ The environment to use for requests from the client. from .environment import GooeyEnvironment
+
+
+
+ Defaults to GooeyEnvironment.DEFAULT
+
+
+
+ api_key : typing.Optional[typing.Union[str, typing.Callable[[], str]]]
timeout : typing.Optional[float]
The timeout to be used, in seconds, for requests. By default the timeout is 60 seconds, unless a custom httpx client is used, in which case this default is not enforced.
- follow_redirects : typing.Optional[bool]
- Whether the default httpx client follows redirects or not, this is irrelevant if a custom httpx client is passed in.
+ follow_redirects : typing.Optional[bool]
+ Whether the default httpx client follows redirects or not, this is irrelevant if a custom httpx client is passed in.
+
+ httpx_client : typing.Optional[httpx.AsyncClient]
+ The httpx client to use for making requests, a preconfigured client is used by default, however this is useful should you want to pass in any custom httpx configuration.
+
+ Examples
+ --------
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+ """
+
+ def __init__(
+ self,
+ *,
+ base_url: typing.Optional[str] = None,
+ environment: GooeyEnvironment = GooeyEnvironment.DEFAULT,
+ api_key: typing.Optional[typing.Union[str, typing.Callable[[], str]]] = os.getenv("GOOEY_API_KEY"),
+ timeout: typing.Optional[float] = None,
+ follow_redirects: typing.Optional[bool] = True,
+ httpx_client: typing.Optional[httpx.AsyncClient] = None
+ ):
+ _defaulted_timeout = timeout if timeout is not None else 60 if httpx_client is None else None
+ if api_key is None:
+ raise ApiError(body="The client must be instantiated be either passing in api_key or setting GOOEY_API_KEY")
+ self._client_wrapper = AsyncClientWrapper(
+ base_url=_get_base_url(base_url=base_url, environment=environment),
+ api_key=api_key,
+ httpx_client=httpx_client
+ if httpx_client is not None
+ else httpx.AsyncClient(timeout=_defaulted_timeout, follow_redirects=follow_redirects)
+ if follow_redirects is not None
+ else httpx.AsyncClient(timeout=_defaulted_timeout),
+ timeout=_defaulted_timeout,
+ )
+ self.copilot_integrations = AsyncCopilotIntegrationsClient(client_wrapper=self._client_wrapper)
+ self.copilot_for_your_enterprise = AsyncCopilotForYourEnterpriseClient(client_wrapper=self._client_wrapper)
+ self.evaluator = AsyncEvaluatorClient(client_wrapper=self._client_wrapper)
+ self.smart_gpt = AsyncSmartGptClient(client_wrapper=self._client_wrapper)
+ self.functions = AsyncFunctionsClient(client_wrapper=self._client_wrapper)
+ self.lip_syncing = AsyncLipSyncingClient(client_wrapper=self._client_wrapper)
+ self.misc = AsyncMiscClient(client_wrapper=self._client_wrapper)
+ self.bulk_runner = AsyncBulkRunnerClient(client_wrapper=self._client_wrapper)
+ self.embeddings = AsyncEmbeddingsClient(client_wrapper=self._client_wrapper)
+
+ async def animate(
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormDeforumSd:
+ """
+ Parameters
+ ----------
+ example_id : typing.Optional[str]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ BodyAsyncFormDeforumSd
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.animate()
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/DeforumSD/async/form", method="POST", params={"example_id": example_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(BodyAsyncFormDeforumSd, parse_obj_as(type_=BodyAsyncFormDeforumSd, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def qr_code(
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormArtQrCode:
+ """
+ Parameters
+ ----------
+ example_id : typing.Optional[str]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ BodyAsyncFormArtQrCode
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.qr_code()
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/art-qr-code/async/form",
+ method="POST",
+ params={"example_id": example_id},
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(BodyAsyncFormArtQrCode, parse_obj_as(type_=BodyAsyncFormArtQrCode, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def seo_people_also_ask(
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormRelatedQnaMaker:
+ """
+ Parameters
+ ----------
+ example_id : typing.Optional[str]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ BodyAsyncFormRelatedQnaMaker
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.seo_people_also_ask()
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/related-qna-maker/async/form",
+ method="POST",
+ params={"example_id": example_id},
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(BodyAsyncFormRelatedQnaMaker, parse_obj_as(type_=BodyAsyncFormRelatedQnaMaker, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def seo_content(
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormSeoSummary:
+ """
+ Parameters
+ ----------
+ example_id : typing.Optional[str]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ BodyAsyncFormSeoSummary
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.seo_content()
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/SEOSummary/async/form",
+ method="POST",
+ params={"example_id": example_id},
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(BodyAsyncFormSeoSummary, parse_obj_as(type_=BodyAsyncFormSeoSummary, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def web_search_llm(
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormGoogleGpt:
+ """
+ Parameters
+ ----------
+ example_id : typing.Optional[str]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ BodyAsyncFormGoogleGpt
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.web_search_llm()
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/google-gpt/async/form",
+ method="POST",
+ params={"example_id": example_id},
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(BodyAsyncFormGoogleGpt, parse_obj_as(type_=BodyAsyncFormGoogleGpt, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def personalize_email(
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormSocialLookupEmail:
+ """
+ Parameters
+ ----------
+ example_id : typing.Optional[str]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ BodyAsyncFormSocialLookupEmail
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.personalize_email()
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/SocialLookupEmail/async/form",
+ method="POST",
+ params={"example_id": example_id},
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(BodyAsyncFormSocialLookupEmail, parse_obj_as(type_=BodyAsyncFormSocialLookupEmail, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def bulk_run(
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormBulkRunner:
+ """
+ Parameters
+ ----------
+ example_id : typing.Optional[str]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ BodyAsyncFormBulkRunner
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.bulk_run()
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/bulk-runner/async/form",
+ method="POST",
+ params={"example_id": example_id},
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(BodyAsyncFormBulkRunner, parse_obj_as(type_=BodyAsyncFormBulkRunner, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def synthesize_data(
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormDocExtract:
+ """
+ Parameters
+ ----------
+ example_id : typing.Optional[str]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ BodyAsyncFormDocExtract
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.synthesize_data()
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/doc-extract/async/form",
+ method="POST",
+ params={"example_id": example_id},
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(BodyAsyncFormDocExtract, parse_obj_as(type_=BodyAsyncFormDocExtract, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def llm(
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormCompareLlm:
+ """
+ Parameters
+ ----------
+ example_id : typing.Optional[str]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ BodyAsyncFormCompareLlm
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.llm()
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/CompareLLM/async/form",
+ method="POST",
+ params={"example_id": example_id},
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(BodyAsyncFormCompareLlm, parse_obj_as(type_=BodyAsyncFormCompareLlm, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def rag(
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormDocSearch:
+ """
+ Parameters
+ ----------
+ example_id : typing.Optional[str]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ BodyAsyncFormDocSearch
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.rag()
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/doc-search/async/form",
+ method="POST",
+ params={"example_id": example_id},
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(BodyAsyncFormDocSearch, parse_obj_as(type_=BodyAsyncFormDocSearch, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def doc_summary(
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormDocSummary:
+ """
+ Parameters
+ ----------
+ example_id : typing.Optional[str]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ BodyAsyncFormDocSummary
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.doc_summary()
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/doc-summary/async/form",
+ method="POST",
+ params={"example_id": example_id},
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(BodyAsyncFormDocSummary, parse_obj_as(type_=BodyAsyncFormDocSummary, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def lipsync_tts(
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormLipsyncTts:
+ """
+ Parameters
+ ----------
+ example_id : typing.Optional[str]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ BodyAsyncFormLipsyncTts
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.lipsync_tts()
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/LipsyncTTS/async/form",
+ method="POST",
+ params={"example_id": example_id},
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(BodyAsyncFormLipsyncTts, parse_obj_as(type_=BodyAsyncFormLipsyncTts, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def text_to_speech(
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormTextToSpeech:
+ """
+ Parameters
+ ----------
+ example_id : typing.Optional[str]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ BodyAsyncFormTextToSpeech
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.text_to_speech()
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/TextToSpeech/async/form",
+ method="POST",
+ params={"example_id": example_id},
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(BodyAsyncFormTextToSpeech, parse_obj_as(type_=BodyAsyncFormTextToSpeech, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def speech_recognition(
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormAsr:
+ """
+ Parameters
+ ----------
+ example_id : typing.Optional[str]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ BodyAsyncFormAsr
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.speech_recognition()
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/asr/async/form", method="POST", params={"example_id": example_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(BodyAsyncFormAsr, parse_obj_as(type_=BodyAsyncFormAsr, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def text_to_music(
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormText2Audio:
+ """
+ Parameters
+ ----------
+ example_id : typing.Optional[str]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ BodyAsyncFormText2Audio
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.text_to_music()
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/text2audio/async/form",
+ method="POST",
+ params={"example_id": example_id},
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(BodyAsyncFormText2Audio, parse_obj_as(type_=BodyAsyncFormText2Audio, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def translate(
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormTranslate:
+ """
+ Parameters
+ ----------
+ example_id : typing.Optional[str]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ BodyAsyncFormTranslate
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.translate()
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/translate/async/form", method="POST", params={"example_id": example_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(BodyAsyncFormTranslate, parse_obj_as(type_=BodyAsyncFormTranslate, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def remix_image(
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormImg2Img:
+ """
+ Parameters
+ ----------
+ example_id : typing.Optional[str]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ BodyAsyncFormImg2Img
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.remix_image()
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/Img2Img/async/form", method="POST", params={"example_id": example_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(BodyAsyncFormImg2Img, parse_obj_as(type_=BodyAsyncFormImg2Img, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def text_to_image(
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormCompareText2Img:
+ """
+ Parameters
+ ----------
+ example_id : typing.Optional[str]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ BodyAsyncFormCompareText2Img
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.text_to_image()
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/CompareText2Img/async/form",
+ method="POST",
+ params={"example_id": example_id},
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(BodyAsyncFormCompareText2Img, parse_obj_as(type_=BodyAsyncFormCompareText2Img, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def product_image(
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormObjectInpainting:
+ """
+ Parameters
+ ----------
+ example_id : typing.Optional[str]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ BodyAsyncFormObjectInpainting
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.product_image()
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/ObjectInpainting/async/form",
+ method="POST",
+ params={"example_id": example_id},
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(BodyAsyncFormObjectInpainting, parse_obj_as(type_=BodyAsyncFormObjectInpainting, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def portrait(
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormFaceInpainting:
+ """
+ Parameters
+ ----------
+ example_id : typing.Optional[str]
- httpx_client : typing.Optional[httpx.AsyncClient]
- The httpx client to use for making requests, a preconfigured client is used by default, however this is useful should you want to pass in any custom httpx configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Examples
- --------
- from gooey import AsyncGooey
+ Returns
+ -------
+ BodyAsyncFormFaceInpainting
+ Successful Response
- client = AsyncGooey(
- api_key="YOUR_API_KEY",
- )
- """
+ Examples
+ --------
+ import asyncio
- def __init__(
- self,
- *,
- base_url: typing.Optional[str] = None,
- environment: GooeyEnvironment = GooeyEnvironment.DEFAULT,
- api_key: typing.Optional[typing.Union[str, typing.Callable[[], str]]] = os.getenv("GOOEY_API_KEY"),
- timeout: typing.Optional[float] = None,
- follow_redirects: typing.Optional[bool] = True,
- httpx_client: typing.Optional[httpx.AsyncClient] = None
- ):
- _defaulted_timeout = timeout if timeout is not None else 60 if httpx_client is None else None
- if api_key is None:
- raise ApiError(body="The client must be instantiated be either passing in api_key or setting GOOEY_API_KEY")
- self._client_wrapper = AsyncClientWrapper(
- base_url=_get_base_url(base_url=base_url, environment=environment),
- api_key=api_key,
- httpx_client=httpx_client
- if httpx_client is not None
- else httpx.AsyncClient(timeout=_defaulted_timeout, follow_redirects=follow_redirects)
- if follow_redirects is not None
- else httpx.AsyncClient(timeout=_defaulted_timeout),
- timeout=_defaulted_timeout,
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
)
- self.copilot_integrations = AsyncCopilotIntegrationsClient(client_wrapper=self._client_wrapper)
- self.copilot_for_your_enterprise = AsyncCopilotForYourEnterpriseClient(client_wrapper=self._client_wrapper)
- self.evaluator = AsyncEvaluatorClient(client_wrapper=self._client_wrapper)
- self.smart_gpt = AsyncSmartGptClient(client_wrapper=self._client_wrapper)
- self.functions = AsyncFunctionsClient(client_wrapper=self._client_wrapper)
- self.lip_syncing = AsyncLipSyncingClient(client_wrapper=self._client_wrapper)
- self.misc = AsyncMiscClient(client_wrapper=self._client_wrapper)
- self.bulk_runner = AsyncBulkRunnerClient(client_wrapper=self._client_wrapper)
- self.embeddings = AsyncEmbeddingsClient(client_wrapper=self._client_wrapper)
- async def animate(
- self,
- *,
- animation_prompts: typing.Sequence[AnimationPrompt],
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- max_frames: typing.Optional[int] = OMIT,
- selected_model: typing.Optional[DeforumSdPageRequestSelectedModel] = OMIT,
- animation_mode: typing.Optional[str] = OMIT,
- zoom: typing.Optional[str] = OMIT,
- translation_x: typing.Optional[str] = OMIT,
- translation_y: typing.Optional[str] = OMIT,
- rotation3d_x: typing.Optional[str] = OMIT,
- rotation3d_y: typing.Optional[str] = OMIT,
- rotation3d_z: typing.Optional[str] = OMIT,
- fps: typing.Optional[int] = OMIT,
- seed: typing.Optional[int] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> DeforumSdPageResponse:
+
+ async def main() -> None:
+ await client.portrait()
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/FaceInpainting/async/form",
+ method="POST",
+ params={"example_id": example_id},
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(BodyAsyncFormFaceInpainting, parse_obj_as(type_=BodyAsyncFormFaceInpainting, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def image_from_email(
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormEmailFaceInpainting:
+ """
+ Parameters
+ ----------
+ example_id : typing.Optional[str]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ BodyAsyncFormEmailFaceInpainting
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.image_from_email()
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/EmailFaceInpainting/async/form",
+ method="POST",
+ params={"example_id": example_id},
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(BodyAsyncFormEmailFaceInpainting, parse_obj_as(type_=BodyAsyncFormEmailFaceInpainting, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def image_from_web_search(
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormGoogleImageGen:
+ """
+ Parameters
+ ----------
+ example_id : typing.Optional[str]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ BodyAsyncFormGoogleImageGen
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.image_from_web_search()
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/GoogleImageGen/async/form",
+ method="POST",
+ params={"example_id": example_id},
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(BodyAsyncFormGoogleImageGen, parse_obj_as(type_=BodyAsyncFormGoogleImageGen, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def remove_background(
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormImageSegmentation:
+ """
+ Parameters
+ ----------
+ example_id : typing.Optional[str]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ BodyAsyncFormImageSegmentation
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.remove_background()
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/ImageSegmentation/async/form",
+ method="POST",
+ params={"example_id": example_id},
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(BodyAsyncFormImageSegmentation, parse_obj_as(type_=BodyAsyncFormImageSegmentation, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def upscale(
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormCompareAiUpscalers:
"""
Parameters
----------
- animation_prompts : typing.Sequence[AnimationPrompt]
-
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- max_frames : typing.Optional[int]
-
- selected_model : typing.Optional[DeforumSdPageRequestSelectedModel]
-
- animation_mode : typing.Optional[str]
-
- zoom : typing.Optional[str]
-
- translation_x : typing.Optional[str]
-
- translation_y : typing.Optional[str]
-
- rotation3d_x : typing.Optional[str]
-
- rotation3d_y : typing.Optional[str]
-
- rotation3d_z : typing.Optional[str]
-
- fps : typing.Optional[int]
-
- seed : typing.Optional[int]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- DeforumSdPageResponse
+ BodyAsyncFormCompareAiUpscalers
Successful Response
Examples
--------
import asyncio
- from gooey import AnimationPrompt, AsyncGooey
+ from gooey import AsyncGooey
client = AsyncGooey(
api_key="YOUR_API_KEY",
@@ -3773,45 +4455,24 @@ async def animate(
async def main() -> None:
- await client.animate(
- animation_prompts=[
- AnimationPrompt(
- frame="frame",
- prompt="prompt",
- )
- ],
- )
+ await client.upscale()
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v3/DeforumSD/async",
+ "v3/compare-ai-upscalers/async/form",
method="POST",
params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "animation_prompts": animation_prompts,
- "max_frames": max_frames,
- "selected_model": selected_model,
- "animation_mode": animation_mode,
- "zoom": zoom,
- "translation_x": translation_x,
- "translation_y": translation_y,
- "rotation_3d_x": rotation3d_x,
- "rotation_3d_y": rotation3d_y,
- "rotation_3d_z": rotation3d_z,
- "fps": fps,
- "seed": seed,
- "settings": settings,
- },
request_options=request_options,
- omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(DeforumSdPageResponse, parse_obj_as(type_=DeforumSdPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(BodyAsyncFormCompareAiUpscalers, parse_obj_as(type_=BodyAsyncFormCompareAiUpscalers, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
if _response.status_code == 402:
raise PaymentRequiredError(
typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
@@ -3824,120 +4485,29 @@ async def main() -> None:
raise TooManyRequestsError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
)
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def qr_code(
- self,
- *,
- text_prompt: str,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- qr_code_data: typing.Optional[str] = OMIT,
- qr_code_input_image: typing.Optional[str] = OMIT,
- qr_code_vcard: typing.Optional[Vcard] = OMIT,
- qr_code_file: typing.Optional[str] = OMIT,
- use_url_shortener: typing.Optional[bool] = OMIT,
- negative_prompt: typing.Optional[str] = OMIT,
- image_prompt: typing.Optional[str] = OMIT,
- image_prompt_controlnet_models: typing.Optional[
- typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]
- ] = OMIT,
- image_prompt_strength: typing.Optional[float] = OMIT,
- image_prompt_scale: typing.Optional[float] = OMIT,
- image_prompt_pos_x: typing.Optional[float] = OMIT,
- image_prompt_pos_y: typing.Optional[float] = OMIT,
- selected_model: typing.Optional[QrCodeGeneratorPageRequestSelectedModel] = OMIT,
- selected_controlnet_model: typing.Optional[
- typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem]
- ] = OMIT,
- output_width: typing.Optional[int] = OMIT,
- output_height: typing.Optional[int] = OMIT,
- guidance_scale: typing.Optional[float] = OMIT,
- controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[int] = OMIT,
- scheduler: typing.Optional[QrCodeGeneratorPageRequestScheduler] = OMIT,
- seed: typing.Optional[int] = OMIT,
- obj_scale: typing.Optional[float] = OMIT,
- obj_pos_x: typing.Optional[float] = OMIT,
- obj_pos_y: typing.Optional[float] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> QrCodeGeneratorPageResponse:
+ async def embed(
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormEmbeddings:
"""
Parameters
----------
- text_prompt : str
-
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- qr_code_data : typing.Optional[str]
-
- qr_code_input_image : typing.Optional[str]
-
- qr_code_vcard : typing.Optional[Vcard]
-
- qr_code_file : typing.Optional[str]
-
- use_url_shortener : typing.Optional[bool]
-
- negative_prompt : typing.Optional[str]
-
- image_prompt : typing.Optional[str]
-
- image_prompt_controlnet_models : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]]
-
- image_prompt_strength : typing.Optional[float]
-
- image_prompt_scale : typing.Optional[float]
-
- image_prompt_pos_x : typing.Optional[float]
-
- image_prompt_pos_y : typing.Optional[float]
-
- selected_model : typing.Optional[QrCodeGeneratorPageRequestSelectedModel]
-
- selected_controlnet_model : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem]]
-
- output_width : typing.Optional[int]
-
- output_height : typing.Optional[int]
-
- guidance_scale : typing.Optional[float]
-
- controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]]
-
- num_outputs : typing.Optional[int]
-
- quality : typing.Optional[int]
-
- scheduler : typing.Optional[QrCodeGeneratorPageRequestScheduler]
-
- seed : typing.Optional[int]
-
- obj_scale : typing.Optional[float]
-
- obj_pos_x : typing.Optional[float]
-
- obj_pos_y : typing.Optional[float]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- QrCodeGeneratorPageResponse
+ BodyAsyncFormEmbeddings
Successful Response
Examples
@@ -3952,54 +4522,24 @@ async def qr_code(
async def main() -> None:
- await client.qr_code(
- text_prompt="text_prompt",
- )
+ await client.embed()
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v3/art-qr-code/async",
+ "v3/embeddings/async/form",
method="POST",
params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "qr_code_data": qr_code_data,
- "qr_code_input_image": qr_code_input_image,
- "qr_code_vcard": qr_code_vcard,
- "qr_code_file": qr_code_file,
- "use_url_shortener": use_url_shortener,
- "text_prompt": text_prompt,
- "negative_prompt": negative_prompt,
- "image_prompt": image_prompt,
- "image_prompt_controlnet_models": image_prompt_controlnet_models,
- "image_prompt_strength": image_prompt_strength,
- "image_prompt_scale": image_prompt_scale,
- "image_prompt_pos_x": image_prompt_pos_x,
- "image_prompt_pos_y": image_prompt_pos_y,
- "selected_model": selected_model,
- "selected_controlnet_model": selected_controlnet_model,
- "output_width": output_width,
- "output_height": output_height,
- "guidance_scale": guidance_scale,
- "controlnet_conditioning_scale": controlnet_conditioning_scale,
- "num_outputs": num_outputs,
- "quality": quality,
- "scheduler": scheduler,
- "seed": seed,
- "obj_scale": obj_scale,
- "obj_pos_x": obj_pos_x,
- "obj_pos_y": obj_pos_y,
- "settings": settings,
- },
request_options=request_options,
- omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(QrCodeGeneratorPageResponse, parse_obj_as(type_=QrCodeGeneratorPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(BodyAsyncFormEmbeddings, parse_obj_as(type_=BodyAsyncFormEmbeddings, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
if _response.status_code == 402:
raise PaymentRequiredError(
typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
@@ -4012,107 +4552,29 @@ async def main() -> None:
raise TooManyRequestsError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
)
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def seo_people_also_ask(
- self,
- *,
- search_query: str,
- site_filter: str,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- task_instructions: typing.Optional[str] = OMIT,
- query_instructions: typing.Optional[str] = OMIT,
- selected_model: typing.Optional[RelatedQnAPageRequestSelectedModel] = OMIT,
- max_search_urls: typing.Optional[int] = OMIT,
- max_references: typing.Optional[int] = OMIT,
- max_context_words: typing.Optional[int] = OMIT,
- scroll_jump: typing.Optional[int] = OMIT,
- embedding_model: typing.Optional[RelatedQnAPageRequestEmbeddingModel] = OMIT,
- dense_weight: typing.Optional[float] = OMIT,
- avoid_repetition: typing.Optional[bool] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[float] = OMIT,
- max_tokens: typing.Optional[int] = OMIT,
- sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[RelatedQnAPageRequestResponseFormatType] = OMIT,
- serp_search_location: typing.Optional[SerpSearchLocation] = OMIT,
- scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
- serp_search_type: typing.Optional[SerpSearchType] = OMIT,
- scaleserp_search_field: typing.Optional[str] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> RelatedQnAPageResponse:
+ async def seo_people_also_ask_doc(
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormRelatedQnaMakerDoc:
"""
Parameters
----------
- search_query : str
-
- site_filter : str
-
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- task_instructions : typing.Optional[str]
-
- query_instructions : typing.Optional[str]
-
- selected_model : typing.Optional[RelatedQnAPageRequestSelectedModel]
-
- max_search_urls : typing.Optional[int]
-
- max_references : typing.Optional[int]
-
- max_context_words : typing.Optional[int]
-
- scroll_jump : typing.Optional[int]
-
- embedding_model : typing.Optional[RelatedQnAPageRequestEmbeddingModel]
-
- dense_weight : typing.Optional[float]
-
- Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
- Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
-
-
- avoid_repetition : typing.Optional[bool]
-
- num_outputs : typing.Optional[int]
-
- quality : typing.Optional[float]
-
- max_tokens : typing.Optional[int]
-
- sampling_temperature : typing.Optional[float]
-
- response_format_type : typing.Optional[RelatedQnAPageRequestResponseFormatType]
-
- serp_search_location : typing.Optional[SerpSearchLocation]
-
- scaleserp_locations : typing.Optional[typing.Sequence[str]]
- DEPRECATED: use `serp_search_location` instead
-
- serp_search_type : typing.Optional[SerpSearchType]
-
- scaleserp_search_field : typing.Optional[str]
- DEPRECATED: use `serp_search_type` instead
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- RelatedQnAPageResponse
+ BodyAsyncFormRelatedQnaMakerDoc
Successful Response
Examples
@@ -4127,50 +4589,24 @@ async def seo_people_also_ask(
async def main() -> None:
- await client.seo_people_also_ask(
- search_query="search_query",
- site_filter="site_filter",
- )
+ await client.seo_people_also_ask_doc()
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v3/related-qna-maker/async",
+ "v3/related-qna-maker-doc/async/form",
method="POST",
params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "search_query": search_query,
- "site_filter": site_filter,
- "task_instructions": task_instructions,
- "query_instructions": query_instructions,
- "selected_model": selected_model,
- "max_search_urls": max_search_urls,
- "max_references": max_references,
- "max_context_words": max_context_words,
- "scroll_jump": scroll_jump,
- "embedding_model": embedding_model,
- "dense_weight": dense_weight,
- "avoid_repetition": avoid_repetition,
- "num_outputs": num_outputs,
- "quality": quality,
- "max_tokens": max_tokens,
- "sampling_temperature": sampling_temperature,
- "response_format_type": response_format_type,
- "serp_search_location": serp_search_location,
- "scaleserp_locations": scaleserp_locations,
- "serp_search_type": serp_search_type,
- "scaleserp_search_field": scaleserp_search_field,
- "settings": settings,
- },
request_options=request_options,
- omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(RelatedQnAPageResponse, parse_obj_as(type_=RelatedQnAPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(BodyAsyncFormRelatedQnaMakerDoc, parse_obj_as(type_=BodyAsyncFormRelatedQnaMakerDoc, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
if _response.status_code == 402:
raise PaymentRequiredError(
typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
@@ -4183,93 +4619,67 @@ async def main() -> None:
raise TooManyRequestsError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
)
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def seo_content(
- self,
- *,
- search_query: str,
- keywords: str,
- title: str,
- company_url: str,
- example_id: typing.Optional[str] = None,
- task_instructions: typing.Optional[str] = OMIT,
- enable_html: typing.Optional[bool] = OMIT,
- selected_model: typing.Optional[SeoSummaryPageRequestSelectedModel] = OMIT,
- max_search_urls: typing.Optional[int] = OMIT,
- enable_crosslinks: typing.Optional[bool] = OMIT,
- seed: typing.Optional[int] = OMIT,
- avoid_repetition: typing.Optional[bool] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[float] = OMIT,
- max_tokens: typing.Optional[int] = OMIT,
- sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[SeoSummaryPageRequestResponseFormatType] = OMIT,
- serp_search_location: typing.Optional[SerpSearchLocation] = OMIT,
- scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
- serp_search_type: typing.Optional[SerpSearchType] = OMIT,
- scaleserp_search_field: typing.Optional[str] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> SeoSummaryPageResponse:
+ async def health_status_get(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
- search_query : str
-
- keywords : str
-
- title : str
-
- company_url : str
-
- example_id : typing.Optional[str]
-
- task_instructions : typing.Optional[str]
-
- enable_html : typing.Optional[bool]
-
- selected_model : typing.Optional[SeoSummaryPageRequestSelectedModel]
-
- max_search_urls : typing.Optional[int]
-
- enable_crosslinks : typing.Optional[bool]
-
- seed : typing.Optional[int]
-
- avoid_repetition : typing.Optional[bool]
-
- num_outputs : typing.Optional[int]
-
- quality : typing.Optional[float]
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- max_tokens : typing.Optional[int]
+ Returns
+ -------
+ typing.Any
+ Successful Response
- sampling_temperature : typing.Optional[float]
+ Examples
+ --------
+ import asyncio
- response_format_type : typing.Optional[SeoSummaryPageRequestResponseFormatType]
+ from gooey import AsyncGooey
- serp_search_location : typing.Optional[SerpSearchLocation]
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
- scaleserp_locations : typing.Optional[typing.Sequence[str]]
- DEPRECATED: use `serp_search_location` instead
- serp_search_type : typing.Optional[SerpSearchType]
+ async def main() -> None:
+ await client.health_status_get()
- scaleserp_search_field : typing.Optional[str]
- DEPRECATED: use `serp_search_type` instead
- settings : typing.Optional[RunSettings]
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "status", method="GET", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+ async def post_v3chyron_plant_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> ChyronPlantPageResponse:
+ """
+ Parameters
+ ----------
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- SeoSummaryPageResponse
+ ChyronPlantPageResponse
Successful Response
Examples
@@ -4284,162 +4694,160 @@ async def seo_content(
async def main() -> None:
- await client.seo_content(
- search_query="search_query",
- keywords="keywords",
- title="title",
- company_url="company_url",
- )
+ await client.post_v3chyron_plant_async()
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v3/SEOSummary/async",
- method="POST",
- params={"example_id": example_id},
- json={
- "search_query": search_query,
- "keywords": keywords,
- "title": title,
- "company_url": company_url,
- "task_instructions": task_instructions,
- "enable_html": enable_html,
- "selected_model": selected_model,
- "max_search_urls": max_search_urls,
- "enable_crosslinks": enable_crosslinks,
- "seed": seed,
- "avoid_repetition": avoid_repetition,
- "num_outputs": num_outputs,
- "quality": quality,
- "max_tokens": max_tokens,
- "sampling_temperature": sampling_temperature,
- "response_format_type": response_format_type,
- "serp_search_location": serp_search_location,
- "scaleserp_locations": scaleserp_locations,
- "serp_search_type": serp_search_type,
- "scaleserp_search_field": scaleserp_search_field,
- "settings": settings,
- },
- request_options=request_options,
- omit=OMIT,
+ "v3/ChyronPlant/async", method="POST", request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(SeoSummaryPageResponse, parse_obj_as(type_=SeoSummaryPageResponse, object_=_response.json())) # type: ignore
- if _response.status_code == 402:
- raise PaymentRequiredError(
- typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 429:
- raise TooManyRequestsError(
- typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
- )
+ return typing.cast(ChyronPlantPageResponse, parse_obj_as(type_=ChyronPlantPageResponse, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def web_search_llm(
- self,
- *,
- search_query: str,
- site_filter: str,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- task_instructions: typing.Optional[str] = OMIT,
- query_instructions: typing.Optional[str] = OMIT,
- selected_model: typing.Optional[GoogleGptPageRequestSelectedModel] = OMIT,
- max_search_urls: typing.Optional[int] = OMIT,
- max_references: typing.Optional[int] = OMIT,
- max_context_words: typing.Optional[int] = OMIT,
- scroll_jump: typing.Optional[int] = OMIT,
- embedding_model: typing.Optional[GoogleGptPageRequestEmbeddingModel] = OMIT,
- dense_weight: typing.Optional[float] = OMIT,
- avoid_repetition: typing.Optional[bool] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[float] = OMIT,
- max_tokens: typing.Optional[int] = OMIT,
- sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[GoogleGptPageRequestResponseFormatType] = OMIT,
- serp_search_location: typing.Optional[SerpSearchLocation] = OMIT,
- scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
- serp_search_type: typing.Optional[SerpSearchType] = OMIT,
- scaleserp_search_field: typing.Optional[str] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> GoogleGptPageResponse:
+ async def post_v3compare_llm_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> CompareLlmPageResponse:
"""
Parameters
----------
- search_query : str
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- site_filter : str
+ Returns
+ -------
+ CompareLlmPageResponse
+ Successful Response
- example_id : typing.Optional[str]
+ Examples
+ --------
+ import asyncio
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
+ from gooey import AsyncGooey
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
- task_instructions : typing.Optional[str]
- query_instructions : typing.Optional[str]
+ async def main() -> None:
+ await client.post_v3compare_llm_async()
- selected_model : typing.Optional[GoogleGptPageRequestSelectedModel]
- max_search_urls : typing.Optional[int]
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/CompareLLM/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(CompareLlmPageResponse, parse_obj_as(type_=CompareLlmPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
- max_references : typing.Optional[int]
+ async def post_v3compare_text2img_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> CompareText2ImgPageResponse:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- max_context_words : typing.Optional[int]
+ Returns
+ -------
+ CompareText2ImgPageResponse
+ Successful Response
- scroll_jump : typing.Optional[int]
+ Examples
+ --------
+ import asyncio
- embedding_model : typing.Optional[GoogleGptPageRequestEmbeddingModel]
+ from gooey import AsyncGooey
- dense_weight : typing.Optional[float]
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
- Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
- Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+ async def main() -> None:
+ await client.post_v3compare_text2img_async()
- avoid_repetition : typing.Optional[bool]
- num_outputs : typing.Optional[int]
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/CompareText2Img/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(CompareText2ImgPageResponse, parse_obj_as(type_=CompareText2ImgPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
- quality : typing.Optional[float]
+ async def post_v3deforum_sd_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> DeforumSdPageResponse:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- max_tokens : typing.Optional[int]
+ Returns
+ -------
+ DeforumSdPageResponse
+ Successful Response
- sampling_temperature : typing.Optional[float]
+ Examples
+ --------
+ import asyncio
- response_format_type : typing.Optional[GoogleGptPageRequestResponseFormatType]
+ from gooey import AsyncGooey
- serp_search_location : typing.Optional[SerpSearchLocation]
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
- scaleserp_locations : typing.Optional[typing.Sequence[str]]
- DEPRECATED: use `serp_search_location` instead
- serp_search_type : typing.Optional[SerpSearchType]
+ async def main() -> None:
+ await client.post_v3deforum_sd_async()
- scaleserp_search_field : typing.Optional[str]
- DEPRECATED: use `serp_search_type` instead
- settings : typing.Optional[RunSettings]
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/DeforumSD/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(DeforumSdPageResponse, parse_obj_as(type_=DeforumSdPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+ async def post_v3email_face_inpainting_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> EmailFaceInpaintingPageResponse:
+ """
+ Parameters
+ ----------
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- GoogleGptPageResponse
+ EmailFaceInpaintingPageResponse
Successful Response
Examples
@@ -4454,121 +4862,34 @@ async def web_search_llm(
async def main() -> None:
- await client.web_search_llm(
- search_query="search_query",
- site_filter="site_filter",
- )
+ await client.post_v3email_face_inpainting_async()
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v3/google-gpt/async",
- method="POST",
- params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "search_query": search_query,
- "site_filter": site_filter,
- "task_instructions": task_instructions,
- "query_instructions": query_instructions,
- "selected_model": selected_model,
- "max_search_urls": max_search_urls,
- "max_references": max_references,
- "max_context_words": max_context_words,
- "scroll_jump": scroll_jump,
- "embedding_model": embedding_model,
- "dense_weight": dense_weight,
- "avoid_repetition": avoid_repetition,
- "num_outputs": num_outputs,
- "quality": quality,
- "max_tokens": max_tokens,
- "sampling_temperature": sampling_temperature,
- "response_format_type": response_format_type,
- "serp_search_location": serp_search_location,
- "scaleserp_locations": scaleserp_locations,
- "serp_search_type": serp_search_type,
- "scaleserp_search_field": scaleserp_search_field,
- "settings": settings,
- },
- request_options=request_options,
- omit=OMIT,
+ "v3/EmailFaceInpainting/async", method="POST", request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(GoogleGptPageResponse, parse_obj_as(type_=GoogleGptPageResponse, object_=_response.json())) # type: ignore
- if _response.status_code == 402:
- raise PaymentRequiredError(
- typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 429:
- raise TooManyRequestsError(
- typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
- )
+ return typing.cast(EmailFaceInpaintingPageResponse, parse_obj_as(type_=EmailFaceInpaintingPageResponse, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def personalize_email(
- self,
- *,
- email_address: str,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- input_prompt: typing.Optional[str] = OMIT,
- selected_model: typing.Optional[SocialLookupEmailPageRequestSelectedModel] = OMIT,
- avoid_repetition: typing.Optional[bool] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[float] = OMIT,
- max_tokens: typing.Optional[int] = OMIT,
- sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[SocialLookupEmailPageRequestResponseFormatType] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> SocialLookupEmailPageResponse:
+ async def post_v3face_inpainting_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> FaceInpaintingPageResponse:
"""
Parameters
----------
- email_address : str
-
- example_id : typing.Optional[str]
-
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- input_prompt : typing.Optional[str]
-
- selected_model : typing.Optional[SocialLookupEmailPageRequestSelectedModel]
-
- avoid_repetition : typing.Optional[bool]
-
- num_outputs : typing.Optional[int]
-
- quality : typing.Optional[float]
-
- max_tokens : typing.Optional[int]
-
- sampling_temperature : typing.Optional[float]
-
- response_format_type : typing.Optional[SocialLookupEmailPageRequestResponseFormatType]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- SocialLookupEmailPageResponse
+ FaceInpaintingPageResponse
Successful Response
Examples
@@ -4583,114 +4904,34 @@ async def personalize_email(
async def main() -> None:
- await client.personalize_email(
- email_address="email_address",
- )
+ await client.post_v3face_inpainting_async()
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v3/SocialLookupEmail/async",
- method="POST",
- params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "email_address": email_address,
- "input_prompt": input_prompt,
- "selected_model": selected_model,
- "avoid_repetition": avoid_repetition,
- "num_outputs": num_outputs,
- "quality": quality,
- "max_tokens": max_tokens,
- "sampling_temperature": sampling_temperature,
- "response_format_type": response_format_type,
- "settings": settings,
- },
- request_options=request_options,
- omit=OMIT,
+ "v3/FaceInpainting/async", method="POST", request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(SocialLookupEmailPageResponse, parse_obj_as(type_=SocialLookupEmailPageResponse, object_=_response.json())) # type: ignore
- if _response.status_code == 402:
- raise PaymentRequiredError(
- typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 429:
- raise TooManyRequestsError(
- typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
- )
+ return typing.cast(FaceInpaintingPageResponse, parse_obj_as(type_=FaceInpaintingPageResponse, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def bulk_run(
- self,
- *,
- documents: typing.Sequence[str],
- run_urls: typing.Sequence[str],
- input_columns: typing.Dict[str, str],
- output_columns: typing.Dict[str, str],
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- eval_urls: typing.Optional[typing.Sequence[str]] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> BulkRunnerPageResponse:
+ async def post_v3google_image_gen_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> GoogleImageGenPageResponse:
"""
Parameters
----------
- documents : typing.Sequence[str]
-
- Upload or link to a CSV or google sheet that contains your sample input data.
- For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs.
- Remember to includes header names in your CSV too.
-
-
- run_urls : typing.Sequence[str]
-
- Provide one or more Gooey.AI workflow runs.
- You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them.
-
-
- input_columns : typing.Dict[str, str]
-
- For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it.
-
-
- output_columns : typing.Dict[str, str]
-
- For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data.
-
-
- example_id : typing.Optional[str]
-
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- eval_urls : typing.Optional[typing.Sequence[str]]
-
- _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs.
-
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BulkRunnerPageResponse
+ GoogleImageGenPageResponse
Successful Response
Examples
@@ -4705,121 +4946,34 @@ async def bulk_run(
async def main() -> None:
- await client.bulk_run(
- documents=["documents"],
- run_urls=["run_urls"],
- input_columns={"key": "value"},
- output_columns={"key": "value"},
- )
+ await client.post_v3google_image_gen_async()
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v3/bulk-runner/async",
- method="POST",
- params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "documents": documents,
- "run_urls": run_urls,
- "input_columns": input_columns,
- "output_columns": output_columns,
- "eval_urls": eval_urls,
- "settings": settings,
- },
- request_options=request_options,
- omit=OMIT,
+ "v3/GoogleImageGen/async", method="POST", request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BulkRunnerPageResponse, parse_obj_as(type_=BulkRunnerPageResponse, object_=_response.json())) # type: ignore
- if _response.status_code == 402:
- raise PaymentRequiredError(
- typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 429:
- raise TooManyRequestsError(
- typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
- )
+ return typing.cast(GoogleImageGenPageResponse, parse_obj_as(type_=GoogleImageGenPageResponse, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def synthesize_data(
- self,
- *,
- documents: typing.Sequence[str],
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- sheet_url: typing.Optional[str] = OMIT,
- selected_asr_model: typing.Optional[DocExtractPageRequestSelectedAsrModel] = OMIT,
- google_translate_target: typing.Optional[str] = OMIT,
- glossary_document: typing.Optional[str] = OMIT,
- task_instructions: typing.Optional[str] = OMIT,
- selected_model: typing.Optional[DocExtractPageRequestSelectedModel] = OMIT,
- avoid_repetition: typing.Optional[bool] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[float] = OMIT,
- max_tokens: typing.Optional[int] = OMIT,
- sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[DocExtractPageRequestResponseFormatType] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> DocExtractPageResponse:
+ async def post_v3image_segmentation_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> ImageSegmentationPageResponse:
"""
Parameters
----------
- documents : typing.Sequence[str]
-
- example_id : typing.Optional[str]
-
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- sheet_url : typing.Optional[str]
-
- selected_asr_model : typing.Optional[DocExtractPageRequestSelectedAsrModel]
-
- google_translate_target : typing.Optional[str]
-
- glossary_document : typing.Optional[str]
- Provide a glossary to customize translation and improve accuracy of domain-specific terms.
- If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
-
- task_instructions : typing.Optional[str]
-
- selected_model : typing.Optional[DocExtractPageRequestSelectedModel]
-
- avoid_repetition : typing.Optional[bool]
-
- num_outputs : typing.Optional[int]
-
- quality : typing.Optional[float]
-
- max_tokens : typing.Optional[int]
-
- sampling_temperature : typing.Optional[float]
-
- response_format_type : typing.Optional[DocExtractPageRequestResponseFormatType]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- DocExtractPageResponse
+ ImageSegmentationPageResponse
Successful Response
Examples
@@ -4834,109 +4988,34 @@ async def synthesize_data(
async def main() -> None:
- await client.synthesize_data(
- documents=["documents"],
- )
+ await client.post_v3image_segmentation_async()
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v3/doc-extract/async",
- method="POST",
- params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "documents": documents,
- "sheet_url": sheet_url,
- "selected_asr_model": selected_asr_model,
- "google_translate_target": google_translate_target,
- "glossary_document": glossary_document,
- "task_instructions": task_instructions,
- "selected_model": selected_model,
- "avoid_repetition": avoid_repetition,
- "num_outputs": num_outputs,
- "quality": quality,
- "max_tokens": max_tokens,
- "sampling_temperature": sampling_temperature,
- "response_format_type": response_format_type,
- "settings": settings,
- },
- request_options=request_options,
- omit=OMIT,
+ "v3/ImageSegmentation/async", method="POST", request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(DocExtractPageResponse, parse_obj_as(type_=DocExtractPageResponse, object_=_response.json())) # type: ignore
- if _response.status_code == 402:
- raise PaymentRequiredError(
- typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 429:
- raise TooManyRequestsError(
- typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
- )
+ return typing.cast(ImageSegmentationPageResponse, parse_obj_as(type_=ImageSegmentationPageResponse, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def llm(
- self,
- *,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- input_prompt: typing.Optional[str] = OMIT,
- selected_models: typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]] = OMIT,
- avoid_repetition: typing.Optional[bool] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[float] = OMIT,
- max_tokens: typing.Optional[int] = OMIT,
- sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[CompareLlmPageRequestResponseFormatType] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> CompareLlmPageResponse:
+ async def post_v3img2img_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> Img2ImgPageResponse:
"""
Parameters
----------
- example_id : typing.Optional[str]
-
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- input_prompt : typing.Optional[str]
-
- selected_models : typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]]
-
- avoid_repetition : typing.Optional[bool]
-
- num_outputs : typing.Optional[int]
-
- quality : typing.Optional[float]
-
- max_tokens : typing.Optional[int]
-
- sampling_temperature : typing.Optional[float]
-
- response_format_type : typing.Optional[CompareLlmPageRequestResponseFormatType]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- CompareLlmPageResponse
+ Img2ImgPageResponse
Successful Response
Examples
@@ -4951,139 +5030,118 @@ async def llm(
async def main() -> None:
- await client.llm()
+ await client.post_v3img2img_async()
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v3/CompareLLM/async",
- method="POST",
- params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "input_prompt": input_prompt,
- "selected_models": selected_models,
- "avoid_repetition": avoid_repetition,
- "num_outputs": num_outputs,
- "quality": quality,
- "max_tokens": max_tokens,
- "sampling_temperature": sampling_temperature,
- "response_format_type": response_format_type,
- "settings": settings,
- },
- request_options=request_options,
- omit=OMIT,
+ "v3/Img2Img/async", method="POST", request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(CompareLlmPageResponse, parse_obj_as(type_=CompareLlmPageResponse, object_=_response.json())) # type: ignore
- if _response.status_code == 402:
- raise PaymentRequiredError(
- typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 429:
- raise TooManyRequestsError(
- typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
- )
+ return typing.cast(Img2ImgPageResponse, parse_obj_as(type_=Img2ImgPageResponse, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def rag(
- self,
- *,
- search_query: str,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- keyword_query: typing.Optional[DocSearchPageRequestKeywordQuery] = OMIT,
- documents: typing.Optional[typing.Sequence[str]] = OMIT,
- max_references: typing.Optional[int] = OMIT,
- max_context_words: typing.Optional[int] = OMIT,
- scroll_jump: typing.Optional[int] = OMIT,
- doc_extract_url: typing.Optional[str] = OMIT,
- embedding_model: typing.Optional[DocSearchPageRequestEmbeddingModel] = OMIT,
- dense_weight: typing.Optional[float] = OMIT,
- task_instructions: typing.Optional[str] = OMIT,
- query_instructions: typing.Optional[str] = OMIT,
- selected_model: typing.Optional[DocSearchPageRequestSelectedModel] = OMIT,
- citation_style: typing.Optional[DocSearchPageRequestCitationStyle] = OMIT,
- avoid_repetition: typing.Optional[bool] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[float] = OMIT,
- max_tokens: typing.Optional[int] = OMIT,
- sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[DocSearchPageRequestResponseFormatType] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> DocSearchPageResponse:
+ async def post_v3letter_writer_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> LetterWriterPageResponse:
"""
Parameters
----------
- search_query : str
-
- example_id : typing.Optional[str]
-
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- keyword_query : typing.Optional[DocSearchPageRequestKeywordQuery]
-
- documents : typing.Optional[typing.Sequence[str]]
-
- max_references : typing.Optional[int]
-
- max_context_words : typing.Optional[int]
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- scroll_jump : typing.Optional[int]
+ Returns
+ -------
+ LetterWriterPageResponse
+ Successful Response
- doc_extract_url : typing.Optional[str]
+ Examples
+ --------
+ import asyncio
- embedding_model : typing.Optional[DocSearchPageRequestEmbeddingModel]
+ from gooey import AsyncGooey
- dense_weight : typing.Optional[float]
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
- Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
- Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+ async def main() -> None:
+ await client.post_v3letter_writer_async()
- task_instructions : typing.Optional[str]
- query_instructions : typing.Optional[str]
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/LetterWriter/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(LetterWriterPageResponse, parse_obj_as(type_=LetterWriterPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
- selected_model : typing.Optional[DocSearchPageRequestSelectedModel]
+ async def post_v3lipsync_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> LipsyncPageResponse:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- citation_style : typing.Optional[DocSearchPageRequestCitationStyle]
+ Returns
+ -------
+ LipsyncPageResponse
+ Successful Response
- avoid_repetition : typing.Optional[bool]
+ Examples
+ --------
+ import asyncio
- num_outputs : typing.Optional[int]
+ from gooey import AsyncGooey
- quality : typing.Optional[float]
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
- max_tokens : typing.Optional[int]
- sampling_temperature : typing.Optional[float]
+ async def main() -> None:
+ await client.post_v3lipsync_async()
- response_format_type : typing.Optional[DocSearchPageRequestResponseFormatType]
- settings : typing.Optional[RunSettings]
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/Lipsync/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(LipsyncPageResponse, parse_obj_as(type_=LipsyncPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+ async def post_v3lipsync_tts_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> LipsyncTtsPageResponse:
+ """
+ Parameters
+ ----------
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- DocSearchPageResponse
+ LipsyncTtsPageResponse
Successful Response
Examples
@@ -5098,130 +5156,34 @@ async def rag(
async def main() -> None:
- await client.rag(
- search_query="search_query",
- )
+ await client.post_v3lipsync_tts_async()
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v3/doc-search/async",
- method="POST",
- params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "search_query": search_query,
- "keyword_query": keyword_query,
- "documents": documents,
- "max_references": max_references,
- "max_context_words": max_context_words,
- "scroll_jump": scroll_jump,
- "doc_extract_url": doc_extract_url,
- "embedding_model": embedding_model,
- "dense_weight": dense_weight,
- "task_instructions": task_instructions,
- "query_instructions": query_instructions,
- "selected_model": selected_model,
- "citation_style": citation_style,
- "avoid_repetition": avoid_repetition,
- "num_outputs": num_outputs,
- "quality": quality,
- "max_tokens": max_tokens,
- "sampling_temperature": sampling_temperature,
- "response_format_type": response_format_type,
- "settings": settings,
- },
- request_options=request_options,
- omit=OMIT,
+ "v3/LipsyncTTS/async", method="POST", request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(DocSearchPageResponse, parse_obj_as(type_=DocSearchPageResponse, object_=_response.json())) # type: ignore
- if _response.status_code == 402:
- raise PaymentRequiredError(
- typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 429:
- raise TooManyRequestsError(
- typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
- )
+ return typing.cast(LipsyncTtsPageResponse, parse_obj_as(type_=LipsyncTtsPageResponse, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def doc_summary(
- self,
- *,
- documents: typing.Sequence[str],
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- task_instructions: typing.Optional[str] = OMIT,
- merge_instructions: typing.Optional[str] = OMIT,
- selected_model: typing.Optional[DocSummaryPageRequestSelectedModel] = OMIT,
- chain_type: typing.Optional[typing.Literal["map_reduce"]] = OMIT,
- selected_asr_model: typing.Optional[DocSummaryPageRequestSelectedAsrModel] = OMIT,
- google_translate_target: typing.Optional[str] = OMIT,
- avoid_repetition: typing.Optional[bool] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[float] = OMIT,
- max_tokens: typing.Optional[int] = OMIT,
- sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[DocSummaryPageRequestResponseFormatType] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> DocSummaryPageResponse:
+ async def post_v3object_inpainting_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> ObjectInpaintingPageResponse:
"""
Parameters
----------
- documents : typing.Sequence[str]
-
- example_id : typing.Optional[str]
-
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- task_instructions : typing.Optional[str]
-
- merge_instructions : typing.Optional[str]
-
- selected_model : typing.Optional[DocSummaryPageRequestSelectedModel]
-
- chain_type : typing.Optional[typing.Literal["map_reduce"]]
-
- selected_asr_model : typing.Optional[DocSummaryPageRequestSelectedAsrModel]
-
- google_translate_target : typing.Optional[str]
-
- avoid_repetition : typing.Optional[bool]
-
- num_outputs : typing.Optional[int]
-
- quality : typing.Optional[float]
-
- max_tokens : typing.Optional[int]
-
- sampling_temperature : typing.Optional[float]
-
- response_format_type : typing.Optional[DocSummaryPageRequestResponseFormatType]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- DocSummaryPageResponse
+ ObjectInpaintingPageResponse
Successful Response
Examples
@@ -5236,164 +5198,118 @@ async def doc_summary(
async def main() -> None:
- await client.doc_summary(
- documents=["documents"],
- )
+ await client.post_v3object_inpainting_async()
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v3/doc-summary/async",
- method="POST",
- params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "documents": documents,
- "task_instructions": task_instructions,
- "merge_instructions": merge_instructions,
- "selected_model": selected_model,
- "chain_type": chain_type,
- "selected_asr_model": selected_asr_model,
- "google_translate_target": google_translate_target,
- "avoid_repetition": avoid_repetition,
- "num_outputs": num_outputs,
- "quality": quality,
- "max_tokens": max_tokens,
- "sampling_temperature": sampling_temperature,
- "response_format_type": response_format_type,
- "settings": settings,
- },
- request_options=request_options,
- omit=OMIT,
+ "v3/ObjectInpainting/async", method="POST", request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(DocSummaryPageResponse, parse_obj_as(type_=DocSummaryPageResponse, object_=_response.json())) # type: ignore
- if _response.status_code == 402:
- raise PaymentRequiredError(
- typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 429:
- raise TooManyRequestsError(
- typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
- )
+ return typing.cast(ObjectInpaintingPageResponse, parse_obj_as(type_=ObjectInpaintingPageResponse, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def lipsync_tts(
- self,
- *,
- text_prompt: str,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- tts_provider: typing.Optional[LipsyncTtsPageRequestTtsProvider] = OMIT,
- uberduck_voice_name: typing.Optional[str] = OMIT,
- uberduck_speaking_rate: typing.Optional[float] = OMIT,
- google_voice_name: typing.Optional[str] = OMIT,
- google_speaking_rate: typing.Optional[float] = OMIT,
- google_pitch: typing.Optional[float] = OMIT,
- bark_history_prompt: typing.Optional[str] = OMIT,
- elevenlabs_voice_name: typing.Optional[str] = OMIT,
- elevenlabs_api_key: typing.Optional[str] = OMIT,
- elevenlabs_voice_id: typing.Optional[str] = OMIT,
- elevenlabs_model: typing.Optional[str] = OMIT,
- elevenlabs_stability: typing.Optional[float] = OMIT,
- elevenlabs_similarity_boost: typing.Optional[float] = OMIT,
- elevenlabs_style: typing.Optional[float] = OMIT,
- elevenlabs_speaker_boost: typing.Optional[bool] = OMIT,
- azure_voice_name: typing.Optional[str] = OMIT,
- openai_voice_name: typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] = OMIT,
- openai_tts_model: typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] = OMIT,
- input_face: typing.Optional[str] = OMIT,
- face_padding_top: typing.Optional[int] = OMIT,
- face_padding_bottom: typing.Optional[int] = OMIT,
- face_padding_left: typing.Optional[int] = OMIT,
- face_padding_right: typing.Optional[int] = OMIT,
- sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT,
- selected_model: typing.Optional[LipsyncTtsPageRequestSelectedModel] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> LipsyncTtsPageResponse:
+ async def post_v3seo_summary_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> SeoSummaryPageResponse:
"""
Parameters
----------
- text_prompt : str
-
- example_id : typing.Optional[str]
-
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- tts_provider : typing.Optional[LipsyncTtsPageRequestTtsProvider]
-
- uberduck_voice_name : typing.Optional[str]
-
- uberduck_speaking_rate : typing.Optional[float]
-
- google_voice_name : typing.Optional[str]
-
- google_speaking_rate : typing.Optional[float]
-
- google_pitch : typing.Optional[float]
-
- bark_history_prompt : typing.Optional[str]
-
- elevenlabs_voice_name : typing.Optional[str]
- Use `elevenlabs_voice_id` instead
-
- elevenlabs_api_key : typing.Optional[str]
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- elevenlabs_voice_id : typing.Optional[str]
+ Returns
+ -------
+ SeoSummaryPageResponse
+ Successful Response
- elevenlabs_model : typing.Optional[str]
+ Examples
+ --------
+ import asyncio
- elevenlabs_stability : typing.Optional[float]
+ from gooey import AsyncGooey
- elevenlabs_similarity_boost : typing.Optional[float]
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
- elevenlabs_style : typing.Optional[float]
- elevenlabs_speaker_boost : typing.Optional[bool]
+ async def main() -> None:
+ await client.post_v3seo_summary_async()
- azure_voice_name : typing.Optional[str]
- openai_voice_name : typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName]
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/SEOSummary/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(SeoSummaryPageResponse, parse_obj_as(type_=SeoSummaryPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
- openai_tts_model : typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel]
+ async def post_v3smart_gpt_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> SmartGptPageResponse:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- input_face : typing.Optional[str]
+ Returns
+ -------
+ SmartGptPageResponse
+ Successful Response
- face_padding_top : typing.Optional[int]
+ Examples
+ --------
+ import asyncio
- face_padding_bottom : typing.Optional[int]
+ from gooey import AsyncGooey
- face_padding_left : typing.Optional[int]
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
- face_padding_right : typing.Optional[int]
- sadtalker_settings : typing.Optional[SadTalkerSettings]
+ async def main() -> None:
+ await client.post_v3smart_gpt_async()
- selected_model : typing.Optional[LipsyncTtsPageRequestSelectedModel]
- settings : typing.Optional[RunSettings]
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/SmartGPT/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(SmartGptPageResponse, parse_obj_as(type_=SmartGptPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+ async def post_v3social_lookup_email_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> SocialLookupEmailPageResponse:
+ """
+ Parameters
+ ----------
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- LipsyncTtsPageResponse
+ SocialLookupEmailPageResponse
Successful Response
Examples
@@ -5408,150 +5324,28 @@ async def lipsync_tts(
async def main() -> None:
- await client.lipsync_tts(
- text_prompt="text_prompt",
- )
+ await client.post_v3social_lookup_email_async()
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v3/LipsyncTTS/async",
- method="POST",
- params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "text_prompt": text_prompt,
- "tts_provider": tts_provider,
- "uberduck_voice_name": uberduck_voice_name,
- "uberduck_speaking_rate": uberduck_speaking_rate,
- "google_voice_name": google_voice_name,
- "google_speaking_rate": google_speaking_rate,
- "google_pitch": google_pitch,
- "bark_history_prompt": bark_history_prompt,
- "elevenlabs_voice_name": elevenlabs_voice_name,
- "elevenlabs_api_key": elevenlabs_api_key,
- "elevenlabs_voice_id": elevenlabs_voice_id,
- "elevenlabs_model": elevenlabs_model,
- "elevenlabs_stability": elevenlabs_stability,
- "elevenlabs_similarity_boost": elevenlabs_similarity_boost,
- "elevenlabs_style": elevenlabs_style,
- "elevenlabs_speaker_boost": elevenlabs_speaker_boost,
- "azure_voice_name": azure_voice_name,
- "openai_voice_name": openai_voice_name,
- "openai_tts_model": openai_tts_model,
- "input_face": input_face,
- "face_padding_top": face_padding_top,
- "face_padding_bottom": face_padding_bottom,
- "face_padding_left": face_padding_left,
- "face_padding_right": face_padding_right,
- "sadtalker_settings": sadtalker_settings,
- "selected_model": selected_model,
- "settings": settings,
- },
- request_options=request_options,
- omit=OMIT,
+ "v3/SocialLookupEmail/async", method="POST", request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(LipsyncTtsPageResponse, parse_obj_as(type_=LipsyncTtsPageResponse, object_=_response.json())) # type: ignore
- if _response.status_code == 402:
- raise PaymentRequiredError(
- typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 429:
- raise TooManyRequestsError(
- typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
- )
+ return typing.cast(SocialLookupEmailPageResponse, parse_obj_as(type_=SocialLookupEmailPageResponse, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def text_to_speech(
- self,
- *,
- text_prompt: str,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- tts_provider: typing.Optional[TextToSpeechPageRequestTtsProvider] = OMIT,
- uberduck_voice_name: typing.Optional[str] = OMIT,
- uberduck_speaking_rate: typing.Optional[float] = OMIT,
- google_voice_name: typing.Optional[str] = OMIT,
- google_speaking_rate: typing.Optional[float] = OMIT,
- google_pitch: typing.Optional[float] = OMIT,
- bark_history_prompt: typing.Optional[str] = OMIT,
- elevenlabs_voice_name: typing.Optional[str] = OMIT,
- elevenlabs_api_key: typing.Optional[str] = OMIT,
- elevenlabs_voice_id: typing.Optional[str] = OMIT,
- elevenlabs_model: typing.Optional[str] = OMIT,
- elevenlabs_stability: typing.Optional[float] = OMIT,
- elevenlabs_similarity_boost: typing.Optional[float] = OMIT,
- elevenlabs_style: typing.Optional[float] = OMIT,
- elevenlabs_speaker_boost: typing.Optional[bool] = OMIT,
- azure_voice_name: typing.Optional[str] = OMIT,
- openai_voice_name: typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] = OMIT,
- openai_tts_model: typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
+ async def post_v3text_to_speech_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
) -> TextToSpeechPageResponse:
"""
Parameters
----------
- text_prompt : str
-
- example_id : typing.Optional[str]
-
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- tts_provider : typing.Optional[TextToSpeechPageRequestTtsProvider]
-
- uberduck_voice_name : typing.Optional[str]
-
- uberduck_speaking_rate : typing.Optional[float]
-
- google_voice_name : typing.Optional[str]
-
- google_speaking_rate : typing.Optional[float]
-
- google_pitch : typing.Optional[float]
-
- bark_history_prompt : typing.Optional[str]
-
- elevenlabs_voice_name : typing.Optional[str]
- Use `elevenlabs_voice_id` instead
-
- elevenlabs_api_key : typing.Optional[str]
-
- elevenlabs_voice_id : typing.Optional[str]
-
- elevenlabs_model : typing.Optional[str]
-
- elevenlabs_stability : typing.Optional[float]
-
- elevenlabs_similarity_boost : typing.Optional[float]
-
- elevenlabs_style : typing.Optional[float]
-
- elevenlabs_speaker_boost : typing.Optional[bool]
-
- azure_voice_name : typing.Optional[str]
-
- openai_voice_name : typing.Optional[TextToSpeechPageRequestOpenaiVoiceName]
-
- openai_tts_model : typing.Optional[TextToSpeechPageRequestOpenaiTtsModel]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -5572,121 +5366,34 @@ async def text_to_speech(
async def main() -> None:
- await client.text_to_speech(
- text_prompt="text_prompt",
- )
+ await client.post_v3text_to_speech_async()
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v3/TextToSpeech/async",
- method="POST",
- params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "text_prompt": text_prompt,
- "tts_provider": tts_provider,
- "uberduck_voice_name": uberduck_voice_name,
- "uberduck_speaking_rate": uberduck_speaking_rate,
- "google_voice_name": google_voice_name,
- "google_speaking_rate": google_speaking_rate,
- "google_pitch": google_pitch,
- "bark_history_prompt": bark_history_prompt,
- "elevenlabs_voice_name": elevenlabs_voice_name,
- "elevenlabs_api_key": elevenlabs_api_key,
- "elevenlabs_voice_id": elevenlabs_voice_id,
- "elevenlabs_model": elevenlabs_model,
- "elevenlabs_stability": elevenlabs_stability,
- "elevenlabs_similarity_boost": elevenlabs_similarity_boost,
- "elevenlabs_style": elevenlabs_style,
- "elevenlabs_speaker_boost": elevenlabs_speaker_boost,
- "azure_voice_name": azure_voice_name,
- "openai_voice_name": openai_voice_name,
- "openai_tts_model": openai_tts_model,
- "settings": settings,
- },
- request_options=request_options,
- omit=OMIT,
+ "v3/TextToSpeech/async", method="POST", request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
return typing.cast(TextToSpeechPageResponse, parse_obj_as(type_=TextToSpeechPageResponse, object_=_response.json())) # type: ignore
- if _response.status_code == 402:
- raise PaymentRequiredError(
- typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 429:
- raise TooManyRequestsError(
- typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
- )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def speech_recognition(
- self,
- *,
- documents: typing.Sequence[str],
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- selected_model: typing.Optional[AsrPageRequestSelectedModel] = OMIT,
- language: typing.Optional[str] = OMIT,
- translation_model: typing.Optional[AsrPageRequestTranslationModel] = OMIT,
- output_format: typing.Optional[AsrPageRequestOutputFormat] = OMIT,
- google_translate_target: typing.Optional[str] = OMIT,
- translation_source: typing.Optional[str] = OMIT,
- translation_target: typing.Optional[str] = OMIT,
- glossary_document: typing.Optional[str] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> AsrPageResponse:
+ async def post_v3art_qr_code_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> QrCodeGeneratorPageResponse:
"""
Parameters
----------
- documents : typing.Sequence[str]
-
- example_id : typing.Optional[str]
-
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- selected_model : typing.Optional[AsrPageRequestSelectedModel]
-
- language : typing.Optional[str]
-
- translation_model : typing.Optional[AsrPageRequestTranslationModel]
-
- output_format : typing.Optional[AsrPageRequestOutputFormat]
-
- google_translate_target : typing.Optional[str]
- use `translation_model` & `translation_target` instead.
-
- translation_source : typing.Optional[str]
-
- translation_target : typing.Optional[str]
-
- glossary_document : typing.Optional[str]
- Provide a glossary to customize translation and improve accuracy of domain-specific terms.
- If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- AsrPageResponse
+ QrCodeGeneratorPageResponse
Successful Response
Examples
@@ -5701,108 +5408,32 @@ async def speech_recognition(
async def main() -> None:
- await client.speech_recognition(
- documents=["documents"],
- )
+ await client.post_v3art_qr_code_async()
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v3/asr/async",
- method="POST",
- params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "documents": documents,
- "selected_model": selected_model,
- "language": language,
- "translation_model": translation_model,
- "output_format": output_format,
- "google_translate_target": google_translate_target,
- "translation_source": translation_source,
- "translation_target": translation_target,
- "glossary_document": glossary_document,
- "settings": settings,
- },
- request_options=request_options,
- omit=OMIT,
+ "v3/art-qr-code/async", method="POST", request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(AsrPageResponse, parse_obj_as(type_=AsrPageResponse, object_=_response.json())) # type: ignore
- if _response.status_code == 402:
- raise PaymentRequiredError(
- typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 429:
- raise TooManyRequestsError(
- typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
- )
+ return typing.cast(QrCodeGeneratorPageResponse, parse_obj_as(type_=QrCodeGeneratorPageResponse, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def text_to_music(
- self,
- *,
- text_prompt: str,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- negative_prompt: typing.Optional[str] = OMIT,
- duration_sec: typing.Optional[float] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[int] = OMIT,
- guidance_scale: typing.Optional[float] = OMIT,
- seed: typing.Optional[int] = OMIT,
- sd2upscaling: typing.Optional[bool] = OMIT,
- selected_models: typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> Text2AudioPageResponse:
+ async def post_v3asr_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> AsrPageResponse:
"""
Parameters
----------
- text_prompt : str
-
- example_id : typing.Optional[str]
-
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- negative_prompt : typing.Optional[str]
-
- duration_sec : typing.Optional[float]
-
- num_outputs : typing.Optional[int]
-
- quality : typing.Optional[int]
-
- guidance_scale : typing.Optional[float]
-
- seed : typing.Optional[int]
-
- sd2upscaling : typing.Optional[bool]
-
- selected_models : typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- Text2AudioPageResponse
+ AsrPageResponse
Successful Response
Examples
@@ -5817,98 +5448,34 @@ async def text_to_music(
async def main() -> None:
- await client.text_to_music(
- text_prompt="text_prompt",
- )
+ await client.post_v3asr_async()
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v3/text2audio/async",
- method="POST",
- params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "text_prompt": text_prompt,
- "negative_prompt": negative_prompt,
- "duration_sec": duration_sec,
- "num_outputs": num_outputs,
- "quality": quality,
- "guidance_scale": guidance_scale,
- "seed": seed,
- "sd_2_upscaling": sd2upscaling,
- "selected_models": selected_models,
- "settings": settings,
- },
- request_options=request_options,
- omit=OMIT,
+ "v3/asr/async", method="POST", request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(Text2AudioPageResponse, parse_obj_as(type_=Text2AudioPageResponse, object_=_response.json())) # type: ignore
- if _response.status_code == 402:
- raise PaymentRequiredError(
- typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 429:
- raise TooManyRequestsError(
- typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
- )
+ return typing.cast(AsrPageResponse, parse_obj_as(type_=AsrPageResponse, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def translate(
- self,
- *,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- texts: typing.Optional[typing.Sequence[str]] = OMIT,
- selected_model: typing.Optional[TranslationPageRequestSelectedModel] = OMIT,
- translation_source: typing.Optional[str] = OMIT,
- translation_target: typing.Optional[str] = OMIT,
- glossary_document: typing.Optional[str] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> TranslationPageResponse:
+ async def post_v3bulk_eval_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> BulkEvalPageResponse:
"""
Parameters
----------
- example_id : typing.Optional[str]
-
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- texts : typing.Optional[typing.Sequence[str]]
-
- selected_model : typing.Optional[TranslationPageRequestSelectedModel]
-
- translation_source : typing.Optional[str]
-
- translation_target : typing.Optional[str]
-
- glossary_document : typing.Optional[str]
- Provide a glossary to customize translation and improve accuracy of domain-specific terms.
- If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- TranslationPageResponse
+ BulkEvalPageResponse
Successful Response
Examples
@@ -5923,117 +5490,34 @@ async def translate(
async def main() -> None:
- await client.translate()
+ await client.post_v3bulk_eval_async()
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v3/translate/async",
- method="POST",
- params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "texts": texts,
- "selected_model": selected_model,
- "translation_source": translation_source,
- "translation_target": translation_target,
- "glossary_document": glossary_document,
- "settings": settings,
- },
- request_options=request_options,
- omit=OMIT,
+ "v3/bulk-eval/async", method="POST", request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(TranslationPageResponse, parse_obj_as(type_=TranslationPageResponse, object_=_response.json())) # type: ignore
- if _response.status_code == 402:
- raise PaymentRequiredError(
- typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 429:
- raise TooManyRequestsError(
- typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
- )
+ return typing.cast(BulkEvalPageResponse, parse_obj_as(type_=BulkEvalPageResponse, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def remix_image(
- self,
- *,
- input_image: str,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- text_prompt: typing.Optional[str] = OMIT,
- selected_model: typing.Optional[Img2ImgPageRequestSelectedModel] = OMIT,
- selected_controlnet_model: typing.Optional[Img2ImgPageRequestSelectedControlnetModel] = OMIT,
- negative_prompt: typing.Optional[str] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[int] = OMIT,
- output_width: typing.Optional[int] = OMIT,
- output_height: typing.Optional[int] = OMIT,
- guidance_scale: typing.Optional[float] = OMIT,
- prompt_strength: typing.Optional[float] = OMIT,
- controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT,
- seed: typing.Optional[int] = OMIT,
- image_guidance_scale: typing.Optional[float] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> Img2ImgPageResponse:
+ async def post_v3bulk_runner_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> BulkRunnerPageResponse:
"""
Parameters
----------
- input_image : str
-
- example_id : typing.Optional[str]
-
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- text_prompt : typing.Optional[str]
-
- selected_model : typing.Optional[Img2ImgPageRequestSelectedModel]
-
- selected_controlnet_model : typing.Optional[Img2ImgPageRequestSelectedControlnetModel]
-
- negative_prompt : typing.Optional[str]
-
- num_outputs : typing.Optional[int]
-
- quality : typing.Optional[int]
-
- output_width : typing.Optional[int]
-
- output_height : typing.Optional[int]
-
- guidance_scale : typing.Optional[float]
-
- prompt_strength : typing.Optional[float]
-
- controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]]
-
- seed : typing.Optional[int]
-
- image_guidance_scale : typing.Optional[float]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- Img2ImgPageResponse
+ BulkRunnerPageResponse
Successful Response
Examples
@@ -6048,131 +5532,34 @@ async def remix_image(
async def main() -> None:
- await client.remix_image(
- input_image="input_image",
- )
+ await client.post_v3bulk_runner_async()
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v3/Img2Img/async",
- method="POST",
- params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "input_image": input_image,
- "text_prompt": text_prompt,
- "selected_model": selected_model,
- "selected_controlnet_model": selected_controlnet_model,
- "negative_prompt": negative_prompt,
- "num_outputs": num_outputs,
- "quality": quality,
- "output_width": output_width,
- "output_height": output_height,
- "guidance_scale": guidance_scale,
- "prompt_strength": prompt_strength,
- "controlnet_conditioning_scale": controlnet_conditioning_scale,
- "seed": seed,
- "image_guidance_scale": image_guidance_scale,
- "settings": settings,
- },
- request_options=request_options,
- omit=OMIT,
+ "v3/bulk-runner/async", method="POST", request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(Img2ImgPageResponse, parse_obj_as(type_=Img2ImgPageResponse, object_=_response.json())) # type: ignore
- if _response.status_code == 402:
- raise PaymentRequiredError(
- typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 429:
- raise TooManyRequestsError(
- typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
- )
+ return typing.cast(BulkRunnerPageResponse, parse_obj_as(type_=BulkRunnerPageResponse, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def text_to_image(
- self,
- *,
- text_prompt: str,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- negative_prompt: typing.Optional[str] = OMIT,
- output_width: typing.Optional[int] = OMIT,
- output_height: typing.Optional[int] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[int] = OMIT,
- dall_e3quality: typing.Optional[str] = OMIT,
- dall_e3style: typing.Optional[str] = OMIT,
- guidance_scale: typing.Optional[float] = OMIT,
- seed: typing.Optional[int] = OMIT,
- sd2upscaling: typing.Optional[bool] = OMIT,
- selected_models: typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] = OMIT,
- scheduler: typing.Optional[CompareText2ImgPageRequestScheduler] = OMIT,
- edit_instruction: typing.Optional[str] = OMIT,
- image_guidance_scale: typing.Optional[float] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> CompareText2ImgPageResponse:
+ async def post_v3compare_ai_upscalers_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> CompareUpscalerPageResponse:
"""
Parameters
----------
- text_prompt : str
-
- example_id : typing.Optional[str]
-
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- negative_prompt : typing.Optional[str]
-
- output_width : typing.Optional[int]
-
- output_height : typing.Optional[int]
-
- num_outputs : typing.Optional[int]
-
- quality : typing.Optional[int]
-
- dall_e3quality : typing.Optional[str]
-
- dall_e3style : typing.Optional[str]
-
- guidance_scale : typing.Optional[float]
-
- seed : typing.Optional[int]
-
- sd2upscaling : typing.Optional[bool]
-
- selected_models : typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]]
-
- scheduler : typing.Optional[CompareText2ImgPageRequestScheduler]
-
- edit_instruction : typing.Optional[str]
-
- image_guidance_scale : typing.Optional[float]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- CompareText2ImgPageResponse
+ CompareUpscalerPageResponse
Successful Response
Examples
@@ -6187,132 +5574,34 @@ async def text_to_image(
async def main() -> None:
- await client.text_to_image(
- text_prompt="text_prompt",
- )
+ await client.post_v3compare_ai_upscalers_async()
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v3/CompareText2Img/async",
- method="POST",
- params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "text_prompt": text_prompt,
- "negative_prompt": negative_prompt,
- "output_width": output_width,
- "output_height": output_height,
- "num_outputs": num_outputs,
- "quality": quality,
- "dall_e_3_quality": dall_e3quality,
- "dall_e_3_style": dall_e3style,
- "guidance_scale": guidance_scale,
- "seed": seed,
- "sd_2_upscaling": sd2upscaling,
- "selected_models": selected_models,
- "scheduler": scheduler,
- "edit_instruction": edit_instruction,
- "image_guidance_scale": image_guidance_scale,
- "settings": settings,
- },
- request_options=request_options,
- omit=OMIT,
+ "v3/compare-ai-upscalers/async", method="POST", request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(CompareText2ImgPageResponse, parse_obj_as(type_=CompareText2ImgPageResponse, object_=_response.json())) # type: ignore
- if _response.status_code == 402:
- raise PaymentRequiredError(
- typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 429:
- raise TooManyRequestsError(
- typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
- )
+ return typing.cast(CompareUpscalerPageResponse, parse_obj_as(type_=CompareUpscalerPageResponse, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def product_image(
- self,
- *,
- input_image: str,
- text_prompt: str,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- obj_scale: typing.Optional[float] = OMIT,
- obj_pos_x: typing.Optional[float] = OMIT,
- obj_pos_y: typing.Optional[float] = OMIT,
- mask_threshold: typing.Optional[float] = OMIT,
- selected_model: typing.Optional[ObjectInpaintingPageRequestSelectedModel] = OMIT,
- negative_prompt: typing.Optional[str] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[int] = OMIT,
- output_width: typing.Optional[int] = OMIT,
- output_height: typing.Optional[int] = OMIT,
- guidance_scale: typing.Optional[float] = OMIT,
- sd2upscaling: typing.Optional[bool] = OMIT,
- seed: typing.Optional[int] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> ObjectInpaintingPageResponse:
+ async def post_v3doc_extract_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> DocExtractPageResponse:
"""
Parameters
----------
- input_image : str
-
- text_prompt : str
-
- example_id : typing.Optional[str]
-
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- obj_scale : typing.Optional[float]
-
- obj_pos_x : typing.Optional[float]
-
- obj_pos_y : typing.Optional[float]
-
- mask_threshold : typing.Optional[float]
-
- selected_model : typing.Optional[ObjectInpaintingPageRequestSelectedModel]
-
- negative_prompt : typing.Optional[str]
-
- num_outputs : typing.Optional[int]
-
- quality : typing.Optional[int]
-
- output_width : typing.Optional[int]
-
- output_height : typing.Optional[int]
-
- guidance_scale : typing.Optional[float]
-
- sd2upscaling : typing.Optional[bool]
-
- seed : typing.Optional[int]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- ObjectInpaintingPageResponse
+ DocExtractPageResponse
Successful Response
Examples
@@ -6327,130 +5616,34 @@ async def product_image(
async def main() -> None:
- await client.product_image(
- input_image="input_image",
- text_prompt="text_prompt",
- )
+ await client.post_v3doc_extract_async()
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v3/ObjectInpainting/async",
- method="POST",
- params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "input_image": input_image,
- "text_prompt": text_prompt,
- "obj_scale": obj_scale,
- "obj_pos_x": obj_pos_x,
- "obj_pos_y": obj_pos_y,
- "mask_threshold": mask_threshold,
- "selected_model": selected_model,
- "negative_prompt": negative_prompt,
- "num_outputs": num_outputs,
- "quality": quality,
- "output_width": output_width,
- "output_height": output_height,
- "guidance_scale": guidance_scale,
- "sd_2_upscaling": sd2upscaling,
- "seed": seed,
- "settings": settings,
- },
- request_options=request_options,
- omit=OMIT,
+ "v3/doc-extract/async", method="POST", request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(ObjectInpaintingPageResponse, parse_obj_as(type_=ObjectInpaintingPageResponse, object_=_response.json())) # type: ignore
- if _response.status_code == 402:
- raise PaymentRequiredError(
- typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 429:
- raise TooManyRequestsError(
- typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
- )
+ return typing.cast(DocExtractPageResponse, parse_obj_as(type_=DocExtractPageResponse, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def portrait(
- self,
- *,
- input_image: str,
- text_prompt: str,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- face_scale: typing.Optional[float] = OMIT,
- face_pos_x: typing.Optional[float] = OMIT,
- face_pos_y: typing.Optional[float] = OMIT,
- selected_model: typing.Optional[FaceInpaintingPageRequestSelectedModel] = OMIT,
- negative_prompt: typing.Optional[str] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[int] = OMIT,
- upscale_factor: typing.Optional[float] = OMIT,
- output_width: typing.Optional[int] = OMIT,
- output_height: typing.Optional[int] = OMIT,
- guidance_scale: typing.Optional[float] = OMIT,
- seed: typing.Optional[int] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> FaceInpaintingPageResponse:
+ async def post_v3doc_search_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> DocSearchPageResponse:
"""
Parameters
----------
- input_image : str
-
- text_prompt : str
-
- example_id : typing.Optional[str]
-
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- face_scale : typing.Optional[float]
-
- face_pos_x : typing.Optional[float]
-
- face_pos_y : typing.Optional[float]
-
- selected_model : typing.Optional[FaceInpaintingPageRequestSelectedModel]
-
- negative_prompt : typing.Optional[str]
-
- num_outputs : typing.Optional[int]
-
- quality : typing.Optional[int]
-
- upscale_factor : typing.Optional[float]
-
- output_width : typing.Optional[int]
-
- output_height : typing.Optional[int]
-
- guidance_scale : typing.Optional[float]
-
- seed : typing.Optional[int]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- FaceInpaintingPageResponse
+ DocSearchPageResponse
Successful Response
Examples
@@ -6465,156 +5658,76 @@ async def portrait(
async def main() -> None:
- await client.portrait(
- input_image="input_image",
- text_prompt="tony stark from the iron man",
- )
+ await client.post_v3doc_search_async()
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v3/FaceInpainting/async",
- method="POST",
- params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "input_image": input_image,
- "text_prompt": text_prompt,
- "face_scale": face_scale,
- "face_pos_x": face_pos_x,
- "face_pos_y": face_pos_y,
- "selected_model": selected_model,
- "negative_prompt": negative_prompt,
- "num_outputs": num_outputs,
- "quality": quality,
- "upscale_factor": upscale_factor,
- "output_width": output_width,
- "output_height": output_height,
- "guidance_scale": guidance_scale,
- "seed": seed,
- "settings": settings,
- },
- request_options=request_options,
- omit=OMIT,
+ "v3/doc-search/async", method="POST", request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(FaceInpaintingPageResponse, parse_obj_as(type_=FaceInpaintingPageResponse, object_=_response.json())) # type: ignore
- if _response.status_code == 402:
- raise PaymentRequiredError(
- typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 429:
- raise TooManyRequestsError(
- typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
- )
+ return typing.cast(DocSearchPageResponse, parse_obj_as(type_=DocSearchPageResponse, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def image_from_email(
- self,
- *,
- text_prompt: str,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- email_address: typing.Optional[str] = OMIT,
- twitter_handle: typing.Optional[str] = OMIT,
- face_scale: typing.Optional[float] = OMIT,
- face_pos_x: typing.Optional[float] = OMIT,
- face_pos_y: typing.Optional[float] = OMIT,
- selected_model: typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] = OMIT,
- negative_prompt: typing.Optional[str] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[int] = OMIT,
- upscale_factor: typing.Optional[float] = OMIT,
- output_width: typing.Optional[int] = OMIT,
- output_height: typing.Optional[int] = OMIT,
- guidance_scale: typing.Optional[float] = OMIT,
- should_send_email: typing.Optional[bool] = OMIT,
- email_from: typing.Optional[str] = OMIT,
- email_cc: typing.Optional[str] = OMIT,
- email_bcc: typing.Optional[str] = OMIT,
- email_subject: typing.Optional[str] = OMIT,
- email_body: typing.Optional[str] = OMIT,
- email_body_enable_html: typing.Optional[bool] = OMIT,
- fallback_email_body: typing.Optional[str] = OMIT,
- seed: typing.Optional[int] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> EmailFaceInpaintingPageResponse:
+ async def post_v3doc_summary_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> DocSummaryPageResponse:
"""
Parameters
----------
- text_prompt : str
-
- example_id : typing.Optional[str]
-
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- email_address : typing.Optional[str]
-
- twitter_handle : typing.Optional[str]
-
- face_scale : typing.Optional[float]
-
- face_pos_x : typing.Optional[float]
-
- face_pos_y : typing.Optional[float]
-
- selected_model : typing.Optional[EmailFaceInpaintingPageRequestSelectedModel]
-
- negative_prompt : typing.Optional[str]
-
- num_outputs : typing.Optional[int]
-
- quality : typing.Optional[int]
-
- upscale_factor : typing.Optional[float]
-
- output_width : typing.Optional[int]
-
- output_height : typing.Optional[int]
-
- guidance_scale : typing.Optional[float]
-
- should_send_email : typing.Optional[bool]
-
- email_from : typing.Optional[str]
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- email_cc : typing.Optional[str]
+ Returns
+ -------
+ DocSummaryPageResponse
+ Successful Response
- email_bcc : typing.Optional[str]
+ Examples
+ --------
+ import asyncio
- email_subject : typing.Optional[str]
+ from gooey import AsyncGooey
- email_body : typing.Optional[str]
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
- email_body_enable_html : typing.Optional[bool]
- fallback_email_body : typing.Optional[str]
+ async def main() -> None:
+ await client.post_v3doc_summary_async()
- seed : typing.Optional[int]
- settings : typing.Optional[RunSettings]
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/doc-summary/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(DocSummaryPageResponse, parse_obj_as(type_=DocSummaryPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+ async def post_v3embeddings_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> EmbeddingsPageResponse:
+ """
+ Parameters
+ ----------
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- EmailFaceInpaintingPageResponse
+ EmbeddingsPageResponse
Successful Response
Examples
@@ -6629,136 +5742,34 @@ async def image_from_email(
async def main() -> None:
- await client.image_from_email(
- email_address="sean@dara.network",
- text_prompt="winter's day in paris",
- )
+ await client.post_v3embeddings_async()
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v3/EmailFaceInpainting/async",
- method="POST",
- params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "email_address": email_address,
- "twitter_handle": twitter_handle,
- "text_prompt": text_prompt,
- "face_scale": face_scale,
- "face_pos_x": face_pos_x,
- "face_pos_y": face_pos_y,
- "selected_model": selected_model,
- "negative_prompt": negative_prompt,
- "num_outputs": num_outputs,
- "quality": quality,
- "upscale_factor": upscale_factor,
- "output_width": output_width,
- "output_height": output_height,
- "guidance_scale": guidance_scale,
- "should_send_email": should_send_email,
- "email_from": email_from,
- "email_cc": email_cc,
- "email_bcc": email_bcc,
- "email_subject": email_subject,
- "email_body": email_body,
- "email_body_enable_html": email_body_enable_html,
- "fallback_email_body": fallback_email_body,
- "seed": seed,
- "settings": settings,
- },
- request_options=request_options,
- omit=OMIT,
+ "v3/embeddings/async", method="POST", request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(EmailFaceInpaintingPageResponse, parse_obj_as(type_=EmailFaceInpaintingPageResponse, object_=_response.json())) # type: ignore
- if _response.status_code == 402:
- raise PaymentRequiredError(
- typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 429:
- raise TooManyRequestsError(
- typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
- )
+ return typing.cast(EmbeddingsPageResponse, parse_obj_as(type_=EmbeddingsPageResponse, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def image_from_web_search(
- self,
- *,
- search_query: str,
- text_prompt: str,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- serp_search_location: typing.Optional[SerpSearchLocation] = OMIT,
- scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
- selected_model: typing.Optional[GoogleImageGenPageRequestSelectedModel] = OMIT,
- negative_prompt: typing.Optional[str] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[int] = OMIT,
- guidance_scale: typing.Optional[float] = OMIT,
- prompt_strength: typing.Optional[float] = OMIT,
- sd2upscaling: typing.Optional[bool] = OMIT,
- seed: typing.Optional[int] = OMIT,
- image_guidance_scale: typing.Optional[float] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> GoogleImageGenPageResponse:
+ async def post_v3functions_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> FunctionsPageResponse:
"""
Parameters
----------
- search_query : str
-
- text_prompt : str
-
- example_id : typing.Optional[str]
-
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- serp_search_location : typing.Optional[SerpSearchLocation]
-
- scaleserp_locations : typing.Optional[typing.Sequence[str]]
- DEPRECATED: use `serp_search_location` instead
-
- selected_model : typing.Optional[GoogleImageGenPageRequestSelectedModel]
-
- negative_prompt : typing.Optional[str]
-
- num_outputs : typing.Optional[int]
-
- quality : typing.Optional[int]
-
- guidance_scale : typing.Optional[float]
-
- prompt_strength : typing.Optional[float]
-
- sd2upscaling : typing.Optional[bool]
-
- seed : typing.Optional[int]
-
- image_guidance_scale : typing.Optional[float]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- GoogleImageGenPageResponse
+ FunctionsPageResponse
Successful Response
Examples
@@ -6773,110 +5784,34 @@ async def image_from_web_search(
async def main() -> None:
- await client.image_from_web_search(
- search_query="search_query",
- text_prompt="text_prompt",
- )
+ await client.post_v3functions_async()
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v3/GoogleImageGen/async",
- method="POST",
- params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "serp_search_location": serp_search_location,
- "scaleserp_locations": scaleserp_locations,
- "search_query": search_query,
- "text_prompt": text_prompt,
- "selected_model": selected_model,
- "negative_prompt": negative_prompt,
- "num_outputs": num_outputs,
- "quality": quality,
- "guidance_scale": guidance_scale,
- "prompt_strength": prompt_strength,
- "sd_2_upscaling": sd2upscaling,
- "seed": seed,
- "image_guidance_scale": image_guidance_scale,
- "settings": settings,
- },
- request_options=request_options,
- omit=OMIT,
+ "v3/functions/async", method="POST", request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(GoogleImageGenPageResponse, parse_obj_as(type_=GoogleImageGenPageResponse, object_=_response.json())) # type: ignore
- if _response.status_code == 402:
- raise PaymentRequiredError(
- typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 429:
- raise TooManyRequestsError(
- typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
- )
+ return typing.cast(FunctionsPageResponse, parse_obj_as(type_=FunctionsPageResponse, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def remove_background(
- self,
- *,
- input_image: str,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- selected_model: typing.Optional[ImageSegmentationPageRequestSelectedModel] = OMIT,
- mask_threshold: typing.Optional[float] = OMIT,
- rect_persepective_transform: typing.Optional[bool] = OMIT,
- reflection_opacity: typing.Optional[float] = OMIT,
- obj_scale: typing.Optional[float] = OMIT,
- obj_pos_x: typing.Optional[float] = OMIT,
- obj_pos_y: typing.Optional[float] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> ImageSegmentationPageResponse:
+ async def post_v3google_gpt_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> GoogleGptPageResponse:
"""
Parameters
----------
- input_image : str
-
- example_id : typing.Optional[str]
-
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- selected_model : typing.Optional[ImageSegmentationPageRequestSelectedModel]
-
- mask_threshold : typing.Optional[float]
-
- rect_persepective_transform : typing.Optional[bool]
-
- reflection_opacity : typing.Optional[float]
-
- obj_scale : typing.Optional[float]
-
- obj_pos_x : typing.Optional[float]
-
- obj_pos_y : typing.Optional[float]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- ImageSegmentationPageResponse
+ GoogleGptPageResponse
Successful Response
Examples
@@ -6891,98 +5826,34 @@ async def remove_background(
async def main() -> None:
- await client.remove_background(
- input_image="input_image",
- )
+ await client.post_v3google_gpt_async()
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v3/ImageSegmentation/async",
- method="POST",
- params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "input_image": input_image,
- "selected_model": selected_model,
- "mask_threshold": mask_threshold,
- "rect_persepective_transform": rect_persepective_transform,
- "reflection_opacity": reflection_opacity,
- "obj_scale": obj_scale,
- "obj_pos_x": obj_pos_x,
- "obj_pos_y": obj_pos_y,
- "settings": settings,
- },
- request_options=request_options,
- omit=OMIT,
+ "v3/google-gpt/async", method="POST", request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(ImageSegmentationPageResponse, parse_obj_as(type_=ImageSegmentationPageResponse, object_=_response.json())) # type: ignore
- if _response.status_code == 402:
- raise PaymentRequiredError(
- typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 429:
- raise TooManyRequestsError(
- typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
- )
+ return typing.cast(GoogleGptPageResponse, parse_obj_as(type_=GoogleGptPageResponse, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def upscale(
- self,
- *,
- scale: int,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- input_image: typing.Optional[str] = OMIT,
- input_video: typing.Optional[str] = OMIT,
- selected_models: typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]] = OMIT,
- selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> CompareUpscalerPageResponse:
+ async def post_v3related_qna_maker_doc_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> RelatedQnADocPageResponse:
"""
Parameters
----------
- scale : int
- The final upsampling scale of the image
-
- example_id : typing.Optional[str]
-
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- input_image : typing.Optional[str]
- Input Image
-
- input_video : typing.Optional[str]
- Input Video
-
- selected_models : typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]]
-
- selected_bg_model : typing.Optional[typing.Literal["real_esrgan_x2"]]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- CompareUpscalerPageResponse
+ RelatedQnADocPageResponse
Successful Response
Examples
@@ -6997,83 +5868,34 @@ async def upscale(
async def main() -> None:
- await client.upscale(
- scale=1,
- )
+ await client.post_v3related_qna_maker_doc_async()
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v3/compare-ai-upscalers/async",
- method="POST",
- params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "input_image": input_image,
- "input_video": input_video,
- "scale": scale,
- "selected_models": selected_models,
- "selected_bg_model": selected_bg_model,
- "settings": settings,
- },
- request_options=request_options,
- omit=OMIT,
+ "v3/related-qna-maker-doc/async", method="POST", request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(CompareUpscalerPageResponse, parse_obj_as(type_=CompareUpscalerPageResponse, object_=_response.json())) # type: ignore
- if _response.status_code == 402:
- raise PaymentRequiredError(
- typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 429:
- raise TooManyRequestsError(
- typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
- )
+ return typing.cast(RelatedQnADocPageResponse, parse_obj_as(type_=RelatedQnADocPageResponse, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def embed(
- self,
- *,
- texts: typing.Sequence[str],
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- selected_model: typing.Optional[EmbeddingsPageRequestSelectedModel] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> EmbeddingsPageResponse:
+ async def post_v3related_qna_maker_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> RelatedQnAPageResponse:
"""
Parameters
----------
- texts : typing.Sequence[str]
-
- example_id : typing.Optional[str]
-
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- selected_model : typing.Optional[EmbeddingsPageRequestSelectedModel]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- EmbeddingsPageResponse
+ RelatedQnAPageResponse
Successful Response
Examples
@@ -7088,149 +5910,76 @@ async def embed(
async def main() -> None:
- await client.embed(
- texts=["texts"],
- )
+ await client.post_v3related_qna_maker_async()
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v3/embeddings/async",
- method="POST",
- params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "texts": texts,
- "selected_model": selected_model,
- "settings": settings,
- },
- request_options=request_options,
- omit=OMIT,
+ "v3/related-qna-maker/async", method="POST", request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(EmbeddingsPageResponse, parse_obj_as(type_=EmbeddingsPageResponse, object_=_response.json())) # type: ignore
- if _response.status_code == 402:
- raise PaymentRequiredError(
- typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 429:
- raise TooManyRequestsError(
- typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
- )
+ return typing.cast(RelatedQnAPageResponse, parse_obj_as(type_=RelatedQnAPageResponse, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def seo_people_also_ask_doc(
- self,
- *,
- search_query: str,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- keyword_query: typing.Optional[RelatedQnADocPageRequestKeywordQuery] = OMIT,
- documents: typing.Optional[typing.Sequence[str]] = OMIT,
- max_references: typing.Optional[int] = OMIT,
- max_context_words: typing.Optional[int] = OMIT,
- scroll_jump: typing.Optional[int] = OMIT,
- doc_extract_url: typing.Optional[str] = OMIT,
- embedding_model: typing.Optional[RelatedQnADocPageRequestEmbeddingModel] = OMIT,
- dense_weight: typing.Optional[float] = OMIT,
- task_instructions: typing.Optional[str] = OMIT,
- query_instructions: typing.Optional[str] = OMIT,
- selected_model: typing.Optional[RelatedQnADocPageRequestSelectedModel] = OMIT,
- citation_style: typing.Optional[RelatedQnADocPageRequestCitationStyle] = OMIT,
- avoid_repetition: typing.Optional[bool] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[float] = OMIT,
- max_tokens: typing.Optional[int] = OMIT,
- sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[RelatedQnADocPageRequestResponseFormatType] = OMIT,
- serp_search_location: typing.Optional[SerpSearchLocation] = OMIT,
- scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
- serp_search_type: typing.Optional[SerpSearchType] = OMIT,
- scaleserp_search_field: typing.Optional[str] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> RelatedQnADocPageResponse:
+ async def post_v3text2audio_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> Text2AudioPageResponse:
"""
Parameters
----------
- search_query : str
-
- example_id : typing.Optional[str]
-
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- keyword_query : typing.Optional[RelatedQnADocPageRequestKeywordQuery]
-
- documents : typing.Optional[typing.Sequence[str]]
-
- max_references : typing.Optional[int]
-
- max_context_words : typing.Optional[int]
-
- scroll_jump : typing.Optional[int]
-
- doc_extract_url : typing.Optional[str]
-
- embedding_model : typing.Optional[RelatedQnADocPageRequestEmbeddingModel]
-
- dense_weight : typing.Optional[float]
-
- Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
- Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
-
-
- task_instructions : typing.Optional[str]
-
- query_instructions : typing.Optional[str]
-
- selected_model : typing.Optional[RelatedQnADocPageRequestSelectedModel]
-
- citation_style : typing.Optional[RelatedQnADocPageRequestCitationStyle]
-
- avoid_repetition : typing.Optional[bool]
-
- num_outputs : typing.Optional[int]
-
- quality : typing.Optional[float]
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- max_tokens : typing.Optional[int]
+ Returns
+ -------
+ Text2AudioPageResponse
+ Successful Response
- sampling_temperature : typing.Optional[float]
+ Examples
+ --------
+ import asyncio
- response_format_type : typing.Optional[RelatedQnADocPageRequestResponseFormatType]
+ from gooey import AsyncGooey
- serp_search_location : typing.Optional[SerpSearchLocation]
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
- scaleserp_locations : typing.Optional[typing.Sequence[str]]
- DEPRECATED: use `serp_search_location` instead
- serp_search_type : typing.Optional[SerpSearchType]
+ async def main() -> None:
+ await client.post_v3text2audio_async()
- scaleserp_search_field : typing.Optional[str]
- DEPRECATED: use `serp_search_type` instead
- settings : typing.Optional[RunSettings]
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/text2audio/async", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(Text2AudioPageResponse, parse_obj_as(type_=Text2AudioPageResponse, object_=_response.json())) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+ async def post_v3translate_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> TranslationPageResponse:
+ """
+ Parameters
+ ----------
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- RelatedQnADocPageResponse
+ TranslationPageResponse
Successful Response
Examples
@@ -7245,69 +5994,25 @@ async def seo_people_also_ask_doc(
async def main() -> None:
- await client.seo_people_also_ask_doc(
- search_query="search_query",
- )
+ await client.post_v3translate_async()
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v3/related-qna-maker-doc/async",
- method="POST",
- params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "search_query": search_query,
- "keyword_query": keyword_query,
- "documents": documents,
- "max_references": max_references,
- "max_context_words": max_context_words,
- "scroll_jump": scroll_jump,
- "doc_extract_url": doc_extract_url,
- "embedding_model": embedding_model,
- "dense_weight": dense_weight,
- "task_instructions": task_instructions,
- "query_instructions": query_instructions,
- "selected_model": selected_model,
- "citation_style": citation_style,
- "avoid_repetition": avoid_repetition,
- "num_outputs": num_outputs,
- "quality": quality,
- "max_tokens": max_tokens,
- "sampling_temperature": sampling_temperature,
- "response_format_type": response_format_type,
- "serp_search_location": serp_search_location,
- "scaleserp_locations": scaleserp_locations,
- "serp_search_type": serp_search_type,
- "scaleserp_search_field": scaleserp_search_field,
- "settings": settings,
- },
- request_options=request_options,
- omit=OMIT,
+ "v3/translate/async", method="POST", request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(RelatedQnADocPageResponse, parse_obj_as(type_=RelatedQnADocPageResponse, object_=_response.json())) # type: ignore
- if _response.status_code == 402:
- raise PaymentRequiredError(
- typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
- )
- if _response.status_code == 429:
- raise TooManyRequestsError(
- typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
- )
+ return typing.cast(TranslationPageResponse, parse_obj_as(type_=TranslationPageResponse, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def health_status_get(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
+ async def post_v3video_bots_async(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> VideoBotsPageResponse:
"""
Parameters
----------
@@ -7316,7 +6021,7 @@ async def health_status_get(self, *, request_options: typing.Optional[RequestOpt
Returns
-------
- typing.Any
+ VideoBotsPageResponse
Successful Response
Examples
@@ -7331,17 +6036,17 @@ async def health_status_get(self, *, request_options: typing.Optional[RequestOpt
async def main() -> None:
- await client.health_status_get()
+ await client.post_v3video_bots_async()
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "status", method="GET", request_options=request_options
+ "v3/video-bots/async", method="POST", request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ return typing.cast(VideoBotsPageResponse, parse_obj_as(type_=VideoBotsPageResponse, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
diff --git a/src/gooey/copilot_for_your_enterprise/__init__.py b/src/gooey/copilot_for_your_enterprise/__init__.py
index f1637db..f3ea265 100644
--- a/src/gooey/copilot_for_your_enterprise/__init__.py
+++ b/src/gooey/copilot_for_your_enterprise/__init__.py
@@ -1,27 +1,2 @@
# This file was auto-generated by Fern from our API Definition.
-from .types import (
- VideoBotsPageRequestAsrModel,
- VideoBotsPageRequestCitationStyle,
- VideoBotsPageRequestEmbeddingModel,
- VideoBotsPageRequestLipsyncModel,
- VideoBotsPageRequestOpenaiTtsModel,
- VideoBotsPageRequestOpenaiVoiceName,
- VideoBotsPageRequestResponseFormatType,
- VideoBotsPageRequestSelectedModel,
- VideoBotsPageRequestTranslationModel,
- VideoBotsPageRequestTtsProvider,
-)
-
-__all__ = [
- "VideoBotsPageRequestAsrModel",
- "VideoBotsPageRequestCitationStyle",
- "VideoBotsPageRequestEmbeddingModel",
- "VideoBotsPageRequestLipsyncModel",
- "VideoBotsPageRequestOpenaiTtsModel",
- "VideoBotsPageRequestOpenaiVoiceName",
- "VideoBotsPageRequestResponseFormatType",
- "VideoBotsPageRequestSelectedModel",
- "VideoBotsPageRequestTranslationModel",
- "VideoBotsPageRequestTtsProvider",
-]
diff --git a/src/gooey/copilot_for_your_enterprise/client.py b/src/gooey/copilot_for_your_enterprise/client.py
index 13a438a..9cb701b 100644
--- a/src/gooey/copilot_for_your_enterprise/client.py
+++ b/src/gooey/copilot_for_your_enterprise/client.py
@@ -7,254 +7,35 @@
from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
from ..core.pydantic_utilities import parse_obj_as
from ..core.request_options import RequestOptions
+from ..errors.bad_request_error import BadRequestError
+from ..errors.internal_server_error import InternalServerError
from ..errors.payment_required_error import PaymentRequiredError
from ..errors.too_many_requests_error import TooManyRequestsError
from ..errors.unprocessable_entity_error import UnprocessableEntityError
-from ..types.conversation_entry import ConversationEntry
+from ..types.body_async_form_video_bots import BodyAsyncFormVideoBots
+from ..types.failed_reponse_model_v2 import FailedReponseModelV2
from ..types.generic_error_response import GenericErrorResponse
from ..types.http_validation_error import HttpValidationError
-from ..types.llm_tools import LlmTools
-from ..types.recipe_function import RecipeFunction
-from ..types.run_settings import RunSettings
-from ..types.sad_talker_settings import SadTalkerSettings
-from ..types.video_bots_page_response import VideoBotsPageResponse
-from .types.video_bots_page_request_asr_model import VideoBotsPageRequestAsrModel
-from .types.video_bots_page_request_citation_style import VideoBotsPageRequestCitationStyle
-from .types.video_bots_page_request_embedding_model import VideoBotsPageRequestEmbeddingModel
-from .types.video_bots_page_request_lipsync_model import VideoBotsPageRequestLipsyncModel
-from .types.video_bots_page_request_openai_tts_model import VideoBotsPageRequestOpenaiTtsModel
-from .types.video_bots_page_request_openai_voice_name import VideoBotsPageRequestOpenaiVoiceName
-from .types.video_bots_page_request_response_format_type import VideoBotsPageRequestResponseFormatType
-from .types.video_bots_page_request_selected_model import VideoBotsPageRequestSelectedModel
-from .types.video_bots_page_request_translation_model import VideoBotsPageRequestTranslationModel
-from .types.video_bots_page_request_tts_provider import VideoBotsPageRequestTtsProvider
-
-# this is used as the default value for optional parameters
-OMIT = typing.cast(typing.Any, ...)
class CopilotForYourEnterpriseClient:
def __init__(self, *, client_wrapper: SyncClientWrapper):
self._client_wrapper = client_wrapper
- def async_video_bots(
- self,
- *,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- input_prompt: typing.Optional[str] = OMIT,
- input_audio: typing.Optional[str] = OMIT,
- input_images: typing.Optional[typing.Sequence[str]] = OMIT,
- input_documents: typing.Optional[typing.Sequence[str]] = OMIT,
- doc_extract_url: typing.Optional[str] = OMIT,
- messages: typing.Optional[typing.Sequence[ConversationEntry]] = OMIT,
- bot_script: typing.Optional[str] = OMIT,
- selected_model: typing.Optional[VideoBotsPageRequestSelectedModel] = OMIT,
- document_model: typing.Optional[str] = OMIT,
- task_instructions: typing.Optional[str] = OMIT,
- query_instructions: typing.Optional[str] = OMIT,
- keyword_instructions: typing.Optional[str] = OMIT,
- documents: typing.Optional[typing.Sequence[str]] = OMIT,
- max_references: typing.Optional[int] = OMIT,
- max_context_words: typing.Optional[int] = OMIT,
- scroll_jump: typing.Optional[int] = OMIT,
- embedding_model: typing.Optional[VideoBotsPageRequestEmbeddingModel] = OMIT,
- dense_weight: typing.Optional[float] = OMIT,
- citation_style: typing.Optional[VideoBotsPageRequestCitationStyle] = OMIT,
- use_url_shortener: typing.Optional[bool] = OMIT,
- asr_model: typing.Optional[VideoBotsPageRequestAsrModel] = OMIT,
- asr_language: typing.Optional[str] = OMIT,
- translation_model: typing.Optional[VideoBotsPageRequestTranslationModel] = OMIT,
- user_language: typing.Optional[str] = OMIT,
- input_glossary_document: typing.Optional[str] = OMIT,
- output_glossary_document: typing.Optional[str] = OMIT,
- lipsync_model: typing.Optional[VideoBotsPageRequestLipsyncModel] = OMIT,
- tools: typing.Optional[typing.Sequence[LlmTools]] = OMIT,
- avoid_repetition: typing.Optional[bool] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[float] = OMIT,
- max_tokens: typing.Optional[int] = OMIT,
- sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[VideoBotsPageRequestResponseFormatType] = OMIT,
- tts_provider: typing.Optional[VideoBotsPageRequestTtsProvider] = OMIT,
- uberduck_voice_name: typing.Optional[str] = OMIT,
- uberduck_speaking_rate: typing.Optional[float] = OMIT,
- google_voice_name: typing.Optional[str] = OMIT,
- google_speaking_rate: typing.Optional[float] = OMIT,
- google_pitch: typing.Optional[float] = OMIT,
- bark_history_prompt: typing.Optional[str] = OMIT,
- elevenlabs_voice_name: typing.Optional[str] = OMIT,
- elevenlabs_api_key: typing.Optional[str] = OMIT,
- elevenlabs_voice_id: typing.Optional[str] = OMIT,
- elevenlabs_model: typing.Optional[str] = OMIT,
- elevenlabs_stability: typing.Optional[float] = OMIT,
- elevenlabs_similarity_boost: typing.Optional[float] = OMIT,
- elevenlabs_style: typing.Optional[float] = OMIT,
- elevenlabs_speaker_boost: typing.Optional[bool] = OMIT,
- azure_voice_name: typing.Optional[str] = OMIT,
- openai_voice_name: typing.Optional[VideoBotsPageRequestOpenaiVoiceName] = OMIT,
- openai_tts_model: typing.Optional[VideoBotsPageRequestOpenaiTtsModel] = OMIT,
- input_face: typing.Optional[str] = OMIT,
- face_padding_top: typing.Optional[int] = OMIT,
- face_padding_bottom: typing.Optional[int] = OMIT,
- face_padding_left: typing.Optional[int] = OMIT,
- face_padding_right: typing.Optional[int] = OMIT,
- sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> VideoBotsPageResponse:
+ def async_form_video_bots(
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormVideoBots:
"""
Parameters
----------
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- input_prompt : typing.Optional[str]
-
- input_audio : typing.Optional[str]
-
- input_images : typing.Optional[typing.Sequence[str]]
-
- input_documents : typing.Optional[typing.Sequence[str]]
-
- doc_extract_url : typing.Optional[str]
- Select a workflow to extract text from documents and images.
-
- messages : typing.Optional[typing.Sequence[ConversationEntry]]
-
- bot_script : typing.Optional[str]
-
- selected_model : typing.Optional[VideoBotsPageRequestSelectedModel]
-
- document_model : typing.Optional[str]
- When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api))
-
- task_instructions : typing.Optional[str]
-
- query_instructions : typing.Optional[str]
-
- keyword_instructions : typing.Optional[str]
-
- documents : typing.Optional[typing.Sequence[str]]
-
- max_references : typing.Optional[int]
-
- max_context_words : typing.Optional[int]
-
- scroll_jump : typing.Optional[int]
-
- embedding_model : typing.Optional[VideoBotsPageRequestEmbeddingModel]
-
- dense_weight : typing.Optional[float]
-
- Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
- Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
-
-
- citation_style : typing.Optional[VideoBotsPageRequestCitationStyle]
-
- use_url_shortener : typing.Optional[bool]
-
- asr_model : typing.Optional[VideoBotsPageRequestAsrModel]
- Choose a model to transcribe incoming audio messages to text.
-
- asr_language : typing.Optional[str]
- Choose a language to transcribe incoming audio messages to text.
-
- translation_model : typing.Optional[VideoBotsPageRequestTranslationModel]
-
- user_language : typing.Optional[str]
- Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages.
-
- input_glossary_document : typing.Optional[str]
-
- Translation Glossary for User Langauge -> LLM Language (English)
-
-
- output_glossary_document : typing.Optional[str]
-
- Translation Glossary for LLM Language (English) -> User Langauge
-
-
- lipsync_model : typing.Optional[VideoBotsPageRequestLipsyncModel]
-
- tools : typing.Optional[typing.Sequence[LlmTools]]
- Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
-
- avoid_repetition : typing.Optional[bool]
-
- num_outputs : typing.Optional[int]
-
- quality : typing.Optional[float]
-
- max_tokens : typing.Optional[int]
-
- sampling_temperature : typing.Optional[float]
-
- response_format_type : typing.Optional[VideoBotsPageRequestResponseFormatType]
-
- tts_provider : typing.Optional[VideoBotsPageRequestTtsProvider]
-
- uberduck_voice_name : typing.Optional[str]
-
- uberduck_speaking_rate : typing.Optional[float]
-
- google_voice_name : typing.Optional[str]
-
- google_speaking_rate : typing.Optional[float]
-
- google_pitch : typing.Optional[float]
-
- bark_history_prompt : typing.Optional[str]
-
- elevenlabs_voice_name : typing.Optional[str]
- Use `elevenlabs_voice_id` instead
-
- elevenlabs_api_key : typing.Optional[str]
-
- elevenlabs_voice_id : typing.Optional[str]
-
- elevenlabs_model : typing.Optional[str]
-
- elevenlabs_stability : typing.Optional[float]
-
- elevenlabs_similarity_boost : typing.Optional[float]
-
- elevenlabs_style : typing.Optional[float]
-
- elevenlabs_speaker_boost : typing.Optional[bool]
-
- azure_voice_name : typing.Optional[str]
-
- openai_voice_name : typing.Optional[VideoBotsPageRequestOpenaiVoiceName]
-
- openai_tts_model : typing.Optional[VideoBotsPageRequestOpenaiTtsModel]
-
- input_face : typing.Optional[str]
-
- face_padding_top : typing.Optional[int]
-
- face_padding_bottom : typing.Optional[int]
-
- face_padding_left : typing.Optional[int]
-
- face_padding_right : typing.Optional[int]
-
- sadtalker_settings : typing.Optional[SadTalkerSettings]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- VideoBotsPageResponse
+ BodyAsyncFormVideoBots
Successful Response
Examples
@@ -264,81 +45,21 @@ def async_video_bots(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.copilot_for_your_enterprise.async_video_bots()
+ client.copilot_for_your_enterprise.async_form_video_bots()
"""
_response = self._client_wrapper.httpx_client.request(
- "v3/video-bots/async",
+ "v3/video-bots/async/form",
method="POST",
params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "input_prompt": input_prompt,
- "input_audio": input_audio,
- "input_images": input_images,
- "input_documents": input_documents,
- "doc_extract_url": doc_extract_url,
- "messages": messages,
- "bot_script": bot_script,
- "selected_model": selected_model,
- "document_model": document_model,
- "task_instructions": task_instructions,
- "query_instructions": query_instructions,
- "keyword_instructions": keyword_instructions,
- "documents": documents,
- "max_references": max_references,
- "max_context_words": max_context_words,
- "scroll_jump": scroll_jump,
- "embedding_model": embedding_model,
- "dense_weight": dense_weight,
- "citation_style": citation_style,
- "use_url_shortener": use_url_shortener,
- "asr_model": asr_model,
- "asr_language": asr_language,
- "translation_model": translation_model,
- "user_language": user_language,
- "input_glossary_document": input_glossary_document,
- "output_glossary_document": output_glossary_document,
- "lipsync_model": lipsync_model,
- "tools": tools,
- "avoid_repetition": avoid_repetition,
- "num_outputs": num_outputs,
- "quality": quality,
- "max_tokens": max_tokens,
- "sampling_temperature": sampling_temperature,
- "response_format_type": response_format_type,
- "tts_provider": tts_provider,
- "uberduck_voice_name": uberduck_voice_name,
- "uberduck_speaking_rate": uberduck_speaking_rate,
- "google_voice_name": google_voice_name,
- "google_speaking_rate": google_speaking_rate,
- "google_pitch": google_pitch,
- "bark_history_prompt": bark_history_prompt,
- "elevenlabs_voice_name": elevenlabs_voice_name,
- "elevenlabs_api_key": elevenlabs_api_key,
- "elevenlabs_voice_id": elevenlabs_voice_id,
- "elevenlabs_model": elevenlabs_model,
- "elevenlabs_stability": elevenlabs_stability,
- "elevenlabs_similarity_boost": elevenlabs_similarity_boost,
- "elevenlabs_style": elevenlabs_style,
- "elevenlabs_speaker_boost": elevenlabs_speaker_boost,
- "azure_voice_name": azure_voice_name,
- "openai_voice_name": openai_voice_name,
- "openai_tts_model": openai_tts_model,
- "input_face": input_face,
- "face_padding_top": face_padding_top,
- "face_padding_bottom": face_padding_bottom,
- "face_padding_left": face_padding_left,
- "face_padding_right": face_padding_right,
- "sadtalker_settings": sadtalker_settings,
- "settings": settings,
- },
request_options=request_options,
- omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(VideoBotsPageResponse, parse_obj_as(type_=VideoBotsPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(BodyAsyncFormVideoBots, parse_obj_as(type_=BodyAsyncFormVideoBots, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
if _response.status_code == 402:
raise PaymentRequiredError(
typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
@@ -351,6 +72,10 @@ def async_video_bots(
raise TooManyRequestsError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
)
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
@@ -361,224 +86,20 @@ class AsyncCopilotForYourEnterpriseClient:
def __init__(self, *, client_wrapper: AsyncClientWrapper):
self._client_wrapper = client_wrapper
- async def async_video_bots(
- self,
- *,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- input_prompt: typing.Optional[str] = OMIT,
- input_audio: typing.Optional[str] = OMIT,
- input_images: typing.Optional[typing.Sequence[str]] = OMIT,
- input_documents: typing.Optional[typing.Sequence[str]] = OMIT,
- doc_extract_url: typing.Optional[str] = OMIT,
- messages: typing.Optional[typing.Sequence[ConversationEntry]] = OMIT,
- bot_script: typing.Optional[str] = OMIT,
- selected_model: typing.Optional[VideoBotsPageRequestSelectedModel] = OMIT,
- document_model: typing.Optional[str] = OMIT,
- task_instructions: typing.Optional[str] = OMIT,
- query_instructions: typing.Optional[str] = OMIT,
- keyword_instructions: typing.Optional[str] = OMIT,
- documents: typing.Optional[typing.Sequence[str]] = OMIT,
- max_references: typing.Optional[int] = OMIT,
- max_context_words: typing.Optional[int] = OMIT,
- scroll_jump: typing.Optional[int] = OMIT,
- embedding_model: typing.Optional[VideoBotsPageRequestEmbeddingModel] = OMIT,
- dense_weight: typing.Optional[float] = OMIT,
- citation_style: typing.Optional[VideoBotsPageRequestCitationStyle] = OMIT,
- use_url_shortener: typing.Optional[bool] = OMIT,
- asr_model: typing.Optional[VideoBotsPageRequestAsrModel] = OMIT,
- asr_language: typing.Optional[str] = OMIT,
- translation_model: typing.Optional[VideoBotsPageRequestTranslationModel] = OMIT,
- user_language: typing.Optional[str] = OMIT,
- input_glossary_document: typing.Optional[str] = OMIT,
- output_glossary_document: typing.Optional[str] = OMIT,
- lipsync_model: typing.Optional[VideoBotsPageRequestLipsyncModel] = OMIT,
- tools: typing.Optional[typing.Sequence[LlmTools]] = OMIT,
- avoid_repetition: typing.Optional[bool] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[float] = OMIT,
- max_tokens: typing.Optional[int] = OMIT,
- sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[VideoBotsPageRequestResponseFormatType] = OMIT,
- tts_provider: typing.Optional[VideoBotsPageRequestTtsProvider] = OMIT,
- uberduck_voice_name: typing.Optional[str] = OMIT,
- uberduck_speaking_rate: typing.Optional[float] = OMIT,
- google_voice_name: typing.Optional[str] = OMIT,
- google_speaking_rate: typing.Optional[float] = OMIT,
- google_pitch: typing.Optional[float] = OMIT,
- bark_history_prompt: typing.Optional[str] = OMIT,
- elevenlabs_voice_name: typing.Optional[str] = OMIT,
- elevenlabs_api_key: typing.Optional[str] = OMIT,
- elevenlabs_voice_id: typing.Optional[str] = OMIT,
- elevenlabs_model: typing.Optional[str] = OMIT,
- elevenlabs_stability: typing.Optional[float] = OMIT,
- elevenlabs_similarity_boost: typing.Optional[float] = OMIT,
- elevenlabs_style: typing.Optional[float] = OMIT,
- elevenlabs_speaker_boost: typing.Optional[bool] = OMIT,
- azure_voice_name: typing.Optional[str] = OMIT,
- openai_voice_name: typing.Optional[VideoBotsPageRequestOpenaiVoiceName] = OMIT,
- openai_tts_model: typing.Optional[VideoBotsPageRequestOpenaiTtsModel] = OMIT,
- input_face: typing.Optional[str] = OMIT,
- face_padding_top: typing.Optional[int] = OMIT,
- face_padding_bottom: typing.Optional[int] = OMIT,
- face_padding_left: typing.Optional[int] = OMIT,
- face_padding_right: typing.Optional[int] = OMIT,
- sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> VideoBotsPageResponse:
+ async def async_form_video_bots(
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormVideoBots:
"""
Parameters
----------
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- input_prompt : typing.Optional[str]
-
- input_audio : typing.Optional[str]
-
- input_images : typing.Optional[typing.Sequence[str]]
-
- input_documents : typing.Optional[typing.Sequence[str]]
-
- doc_extract_url : typing.Optional[str]
- Select a workflow to extract text from documents and images.
-
- messages : typing.Optional[typing.Sequence[ConversationEntry]]
-
- bot_script : typing.Optional[str]
-
- selected_model : typing.Optional[VideoBotsPageRequestSelectedModel]
-
- document_model : typing.Optional[str]
- When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api))
-
- task_instructions : typing.Optional[str]
-
- query_instructions : typing.Optional[str]
-
- keyword_instructions : typing.Optional[str]
-
- documents : typing.Optional[typing.Sequence[str]]
-
- max_references : typing.Optional[int]
-
- max_context_words : typing.Optional[int]
-
- scroll_jump : typing.Optional[int]
-
- embedding_model : typing.Optional[VideoBotsPageRequestEmbeddingModel]
-
- dense_weight : typing.Optional[float]
-
- Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
- Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
-
-
- citation_style : typing.Optional[VideoBotsPageRequestCitationStyle]
-
- use_url_shortener : typing.Optional[bool]
-
- asr_model : typing.Optional[VideoBotsPageRequestAsrModel]
- Choose a model to transcribe incoming audio messages to text.
-
- asr_language : typing.Optional[str]
- Choose a language to transcribe incoming audio messages to text.
-
- translation_model : typing.Optional[VideoBotsPageRequestTranslationModel]
-
- user_language : typing.Optional[str]
- Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages.
-
- input_glossary_document : typing.Optional[str]
-
- Translation Glossary for User Langauge -> LLM Language (English)
-
-
- output_glossary_document : typing.Optional[str]
-
- Translation Glossary for LLM Language (English) -> User Langauge
-
-
- lipsync_model : typing.Optional[VideoBotsPageRequestLipsyncModel]
-
- tools : typing.Optional[typing.Sequence[LlmTools]]
- Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
-
- avoid_repetition : typing.Optional[bool]
-
- num_outputs : typing.Optional[int]
-
- quality : typing.Optional[float]
-
- max_tokens : typing.Optional[int]
-
- sampling_temperature : typing.Optional[float]
-
- response_format_type : typing.Optional[VideoBotsPageRequestResponseFormatType]
-
- tts_provider : typing.Optional[VideoBotsPageRequestTtsProvider]
-
- uberduck_voice_name : typing.Optional[str]
-
- uberduck_speaking_rate : typing.Optional[float]
-
- google_voice_name : typing.Optional[str]
-
- google_speaking_rate : typing.Optional[float]
-
- google_pitch : typing.Optional[float]
-
- bark_history_prompt : typing.Optional[str]
-
- elevenlabs_voice_name : typing.Optional[str]
- Use `elevenlabs_voice_id` instead
-
- elevenlabs_api_key : typing.Optional[str]
-
- elevenlabs_voice_id : typing.Optional[str]
-
- elevenlabs_model : typing.Optional[str]
-
- elevenlabs_stability : typing.Optional[float]
-
- elevenlabs_similarity_boost : typing.Optional[float]
-
- elevenlabs_style : typing.Optional[float]
-
- elevenlabs_speaker_boost : typing.Optional[bool]
-
- azure_voice_name : typing.Optional[str]
-
- openai_voice_name : typing.Optional[VideoBotsPageRequestOpenaiVoiceName]
-
- openai_tts_model : typing.Optional[VideoBotsPageRequestOpenaiTtsModel]
-
- input_face : typing.Optional[str]
-
- face_padding_top : typing.Optional[int]
-
- face_padding_bottom : typing.Optional[int]
-
- face_padding_left : typing.Optional[int]
-
- face_padding_right : typing.Optional[int]
-
- sadtalker_settings : typing.Optional[SadTalkerSettings]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- VideoBotsPageResponse
+ BodyAsyncFormVideoBots
Successful Response
Examples
@@ -593,84 +114,24 @@ async def async_video_bots(
async def main() -> None:
- await client.copilot_for_your_enterprise.async_video_bots()
+ await client.copilot_for_your_enterprise.async_form_video_bots()
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v3/video-bots/async",
+ "v3/video-bots/async/form",
method="POST",
params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "input_prompt": input_prompt,
- "input_audio": input_audio,
- "input_images": input_images,
- "input_documents": input_documents,
- "doc_extract_url": doc_extract_url,
- "messages": messages,
- "bot_script": bot_script,
- "selected_model": selected_model,
- "document_model": document_model,
- "task_instructions": task_instructions,
- "query_instructions": query_instructions,
- "keyword_instructions": keyword_instructions,
- "documents": documents,
- "max_references": max_references,
- "max_context_words": max_context_words,
- "scroll_jump": scroll_jump,
- "embedding_model": embedding_model,
- "dense_weight": dense_weight,
- "citation_style": citation_style,
- "use_url_shortener": use_url_shortener,
- "asr_model": asr_model,
- "asr_language": asr_language,
- "translation_model": translation_model,
- "user_language": user_language,
- "input_glossary_document": input_glossary_document,
- "output_glossary_document": output_glossary_document,
- "lipsync_model": lipsync_model,
- "tools": tools,
- "avoid_repetition": avoid_repetition,
- "num_outputs": num_outputs,
- "quality": quality,
- "max_tokens": max_tokens,
- "sampling_temperature": sampling_temperature,
- "response_format_type": response_format_type,
- "tts_provider": tts_provider,
- "uberduck_voice_name": uberduck_voice_name,
- "uberduck_speaking_rate": uberduck_speaking_rate,
- "google_voice_name": google_voice_name,
- "google_speaking_rate": google_speaking_rate,
- "google_pitch": google_pitch,
- "bark_history_prompt": bark_history_prompt,
- "elevenlabs_voice_name": elevenlabs_voice_name,
- "elevenlabs_api_key": elevenlabs_api_key,
- "elevenlabs_voice_id": elevenlabs_voice_id,
- "elevenlabs_model": elevenlabs_model,
- "elevenlabs_stability": elevenlabs_stability,
- "elevenlabs_similarity_boost": elevenlabs_similarity_boost,
- "elevenlabs_style": elevenlabs_style,
- "elevenlabs_speaker_boost": elevenlabs_speaker_boost,
- "azure_voice_name": azure_voice_name,
- "openai_voice_name": openai_voice_name,
- "openai_tts_model": openai_tts_model,
- "input_face": input_face,
- "face_padding_top": face_padding_top,
- "face_padding_bottom": face_padding_bottom,
- "face_padding_left": face_padding_left,
- "face_padding_right": face_padding_right,
- "sadtalker_settings": sadtalker_settings,
- "settings": settings,
- },
request_options=request_options,
- omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(VideoBotsPageResponse, parse_obj_as(type_=VideoBotsPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(BodyAsyncFormVideoBots, parse_obj_as(type_=BodyAsyncFormVideoBots, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
if _response.status_code == 402:
raise PaymentRequiredError(
typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
@@ -683,6 +144,10 @@ async def main() -> None:
raise TooManyRequestsError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
)
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
diff --git a/src/gooey/copilot_for_your_enterprise/types/__init__.py b/src/gooey/copilot_for_your_enterprise/types/__init__.py
deleted file mode 100644
index dd7ed8b..0000000
--- a/src/gooey/copilot_for_your_enterprise/types/__init__.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from .video_bots_page_request_asr_model import VideoBotsPageRequestAsrModel
-from .video_bots_page_request_citation_style import VideoBotsPageRequestCitationStyle
-from .video_bots_page_request_embedding_model import VideoBotsPageRequestEmbeddingModel
-from .video_bots_page_request_lipsync_model import VideoBotsPageRequestLipsyncModel
-from .video_bots_page_request_openai_tts_model import VideoBotsPageRequestOpenaiTtsModel
-from .video_bots_page_request_openai_voice_name import VideoBotsPageRequestOpenaiVoiceName
-from .video_bots_page_request_response_format_type import VideoBotsPageRequestResponseFormatType
-from .video_bots_page_request_selected_model import VideoBotsPageRequestSelectedModel
-from .video_bots_page_request_translation_model import VideoBotsPageRequestTranslationModel
-from .video_bots_page_request_tts_provider import VideoBotsPageRequestTtsProvider
-
-__all__ = [
- "VideoBotsPageRequestAsrModel",
- "VideoBotsPageRequestCitationStyle",
- "VideoBotsPageRequestEmbeddingModel",
- "VideoBotsPageRequestLipsyncModel",
- "VideoBotsPageRequestOpenaiTtsModel",
- "VideoBotsPageRequestOpenaiVoiceName",
- "VideoBotsPageRequestResponseFormatType",
- "VideoBotsPageRequestSelectedModel",
- "VideoBotsPageRequestTranslationModel",
- "VideoBotsPageRequestTtsProvider",
-]
diff --git a/src/gooey/core/client_wrapper.py b/src/gooey/core/client_wrapper.py
index be15e61..6006fe1 100644
--- a/src/gooey/core/client_wrapper.py
+++ b/src/gooey/core/client_wrapper.py
@@ -23,7 +23,7 @@ def get_headers(self) -> typing.Dict[str, str]:
headers: typing.Dict[str, str] = {
"X-Fern-Language": "Python",
"X-Fern-SDK-Name": "gooeyai",
- "X-Fern-SDK-Version": "0.0.1-beta3",
+ "X-Fern-SDK-Version": "0.0.1-beta5",
}
headers["Authorization"] = f"Bearer {self._get_api_key()}"
return headers
diff --git a/src/gooey/errors/__init__.py b/src/gooey/errors/__init__.py
index 19ea9c4..459b921 100644
--- a/src/gooey/errors/__init__.py
+++ b/src/gooey/errors/__init__.py
@@ -1,7 +1,15 @@
# This file was auto-generated by Fern from our API Definition.
+from .bad_request_error import BadRequestError
+from .internal_server_error import InternalServerError
from .payment_required_error import PaymentRequiredError
from .too_many_requests_error import TooManyRequestsError
from .unprocessable_entity_error import UnprocessableEntityError
-__all__ = ["PaymentRequiredError", "TooManyRequestsError", "UnprocessableEntityError"]
+__all__ = [
+ "BadRequestError",
+ "InternalServerError",
+ "PaymentRequiredError",
+ "TooManyRequestsError",
+ "UnprocessableEntityError",
+]
diff --git a/src/gooey/errors/bad_request_error.py b/src/gooey/errors/bad_request_error.py
new file mode 100644
index 0000000..02f5144
--- /dev/null
+++ b/src/gooey/errors/bad_request_error.py
@@ -0,0 +1,9 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.api_error import ApiError
+from ..types.generic_error_response import GenericErrorResponse
+
+
+class BadRequestError(ApiError):
+ def __init__(self, body: GenericErrorResponse):
+ super().__init__(status_code=400, body=body)
diff --git a/src/gooey/errors/internal_server_error.py b/src/gooey/errors/internal_server_error.py
new file mode 100644
index 0000000..3be52c0
--- /dev/null
+++ b/src/gooey/errors/internal_server_error.py
@@ -0,0 +1,9 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.api_error import ApiError
+from ..types.failed_reponse_model_v2 import FailedReponseModelV2
+
+
+class InternalServerError(ApiError):
+ def __init__(self, body: FailedReponseModelV2):
+ super().__init__(status_code=500, body=body)
diff --git a/src/gooey/evaluator/__init__.py b/src/gooey/evaluator/__init__.py
index 7ceefb0..f3ea265 100644
--- a/src/gooey/evaluator/__init__.py
+++ b/src/gooey/evaluator/__init__.py
@@ -1,5 +1,2 @@
# This file was auto-generated by Fern from our API Definition.
-from .types import BulkEvalPageRequestResponseFormatType, BulkEvalPageRequestSelectedModel
-
-__all__ = ["BulkEvalPageRequestResponseFormatType", "BulkEvalPageRequestSelectedModel"]
diff --git a/src/gooey/evaluator/client.py b/src/gooey/evaluator/client.py
index 7731555..1e4720f 100644
--- a/src/gooey/evaluator/client.py
+++ b/src/gooey/evaluator/client.py
@@ -7,96 +7,35 @@
from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
from ..core.pydantic_utilities import parse_obj_as
from ..core.request_options import RequestOptions
+from ..errors.bad_request_error import BadRequestError
+from ..errors.internal_server_error import InternalServerError
from ..errors.payment_required_error import PaymentRequiredError
from ..errors.too_many_requests_error import TooManyRequestsError
from ..errors.unprocessable_entity_error import UnprocessableEntityError
-from ..types.agg_function import AggFunction
-from ..types.bulk_eval_page_response import BulkEvalPageResponse
-from ..types.eval_prompt import EvalPrompt
+from ..types.body_async_form_bulk_eval import BodyAsyncFormBulkEval
+from ..types.failed_reponse_model_v2 import FailedReponseModelV2
from ..types.generic_error_response import GenericErrorResponse
from ..types.http_validation_error import HttpValidationError
-from ..types.recipe_function import RecipeFunction
-from ..types.run_settings import RunSettings
-from .types.bulk_eval_page_request_response_format_type import BulkEvalPageRequestResponseFormatType
-from .types.bulk_eval_page_request_selected_model import BulkEvalPageRequestSelectedModel
-
-# this is used as the default value for optional parameters
-OMIT = typing.cast(typing.Any, ...)
class EvaluatorClient:
def __init__(self, *, client_wrapper: SyncClientWrapper):
self._client_wrapper = client_wrapper
- def async_bulk_eval(
- self,
- *,
- documents: typing.Sequence[str],
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- eval_prompts: typing.Optional[typing.Sequence[EvalPrompt]] = OMIT,
- agg_functions: typing.Optional[typing.Sequence[AggFunction]] = OMIT,
- selected_model: typing.Optional[BulkEvalPageRequestSelectedModel] = OMIT,
- avoid_repetition: typing.Optional[bool] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[float] = OMIT,
- max_tokens: typing.Optional[int] = OMIT,
- sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[BulkEvalPageRequestResponseFormatType] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> BulkEvalPageResponse:
+ def async_form_bulk_eval(
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormBulkEval:
"""
Parameters
----------
- documents : typing.Sequence[str]
-
- Upload or link to a CSV or google sheet that contains your sample input data.
- For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs.
- Remember to includes header names in your CSV too.
-
-
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- eval_prompts : typing.Optional[typing.Sequence[EvalPrompt]]
-
- Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values.
- _The `columns` dictionary can be used to reference the spreadsheet columns._
-
-
- agg_functions : typing.Optional[typing.Sequence[AggFunction]]
-
- Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats).
-
-
- selected_model : typing.Optional[BulkEvalPageRequestSelectedModel]
-
- avoid_repetition : typing.Optional[bool]
-
- num_outputs : typing.Optional[int]
-
- quality : typing.Optional[float]
-
- max_tokens : typing.Optional[int]
-
- sampling_temperature : typing.Optional[float]
-
- response_format_type : typing.Optional[BulkEvalPageRequestResponseFormatType]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BulkEvalPageResponse
+ BodyAsyncFormBulkEval
Successful Response
Examples
@@ -106,35 +45,18 @@ def async_bulk_eval(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.evaluator.async_bulk_eval(
- documents=["documents"],
- )
+ client.evaluator.async_form_bulk_eval()
"""
_response = self._client_wrapper.httpx_client.request(
- "v3/bulk-eval/async",
- method="POST",
- params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "documents": documents,
- "eval_prompts": eval_prompts,
- "agg_functions": agg_functions,
- "selected_model": selected_model,
- "avoid_repetition": avoid_repetition,
- "num_outputs": num_outputs,
- "quality": quality,
- "max_tokens": max_tokens,
- "sampling_temperature": sampling_temperature,
- "response_format_type": response_format_type,
- "settings": settings,
- },
- request_options=request_options,
- omit=OMIT,
+ "v3/bulk-eval/async/form", method="POST", params={"example_id": example_id}, request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BulkEvalPageResponse, parse_obj_as(type_=BulkEvalPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(BodyAsyncFormBulkEval, parse_obj_as(type_=BodyAsyncFormBulkEval, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
if _response.status_code == 402:
raise PaymentRequiredError(
typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
@@ -147,6 +69,10 @@ def async_bulk_eval(
raise TooManyRequestsError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
)
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
@@ -157,75 +83,20 @@ class AsyncEvaluatorClient:
def __init__(self, *, client_wrapper: AsyncClientWrapper):
self._client_wrapper = client_wrapper
- async def async_bulk_eval(
- self,
- *,
- documents: typing.Sequence[str],
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- eval_prompts: typing.Optional[typing.Sequence[EvalPrompt]] = OMIT,
- agg_functions: typing.Optional[typing.Sequence[AggFunction]] = OMIT,
- selected_model: typing.Optional[BulkEvalPageRequestSelectedModel] = OMIT,
- avoid_repetition: typing.Optional[bool] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[float] = OMIT,
- max_tokens: typing.Optional[int] = OMIT,
- sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[BulkEvalPageRequestResponseFormatType] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> BulkEvalPageResponse:
+ async def async_form_bulk_eval(
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormBulkEval:
"""
Parameters
----------
- documents : typing.Sequence[str]
-
- Upload or link to a CSV or google sheet that contains your sample input data.
- For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs.
- Remember to includes header names in your CSV too.
-
-
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- eval_prompts : typing.Optional[typing.Sequence[EvalPrompt]]
-
- Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values.
- _The `columns` dictionary can be used to reference the spreadsheet columns._
-
-
- agg_functions : typing.Optional[typing.Sequence[AggFunction]]
-
- Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats).
-
-
- selected_model : typing.Optional[BulkEvalPageRequestSelectedModel]
-
- avoid_repetition : typing.Optional[bool]
-
- num_outputs : typing.Optional[int]
-
- quality : typing.Optional[float]
-
- max_tokens : typing.Optional[int]
-
- sampling_temperature : typing.Optional[float]
-
- response_format_type : typing.Optional[BulkEvalPageRequestResponseFormatType]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BulkEvalPageResponse
+ BodyAsyncFormBulkEval
Successful Response
Examples
@@ -240,38 +111,21 @@ async def async_bulk_eval(
async def main() -> None:
- await client.evaluator.async_bulk_eval(
- documents=["documents"],
- )
+ await client.evaluator.async_form_bulk_eval()
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v3/bulk-eval/async",
- method="POST",
- params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "documents": documents,
- "eval_prompts": eval_prompts,
- "agg_functions": agg_functions,
- "selected_model": selected_model,
- "avoid_repetition": avoid_repetition,
- "num_outputs": num_outputs,
- "quality": quality,
- "max_tokens": max_tokens,
- "sampling_temperature": sampling_temperature,
- "response_format_type": response_format_type,
- "settings": settings,
- },
- request_options=request_options,
- omit=OMIT,
+ "v3/bulk-eval/async/form", method="POST", params={"example_id": example_id}, request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BulkEvalPageResponse, parse_obj_as(type_=BulkEvalPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(BodyAsyncFormBulkEval, parse_obj_as(type_=BodyAsyncFormBulkEval, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
if _response.status_code == 402:
raise PaymentRequiredError(
typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
@@ -284,6 +138,10 @@ async def main() -> None:
raise TooManyRequestsError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
)
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
diff --git a/src/gooey/evaluator/types/__init__.py b/src/gooey/evaluator/types/__init__.py
deleted file mode 100644
index 67f1384..0000000
--- a/src/gooey/evaluator/types/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from .bulk_eval_page_request_response_format_type import BulkEvalPageRequestResponseFormatType
-from .bulk_eval_page_request_selected_model import BulkEvalPageRequestSelectedModel
-
-__all__ = ["BulkEvalPageRequestResponseFormatType", "BulkEvalPageRequestSelectedModel"]
diff --git a/src/gooey/functions/client.py b/src/gooey/functions/client.py
index d15f376..79ae564 100644
--- a/src/gooey/functions/client.py
+++ b/src/gooey/functions/client.py
@@ -7,50 +7,35 @@
from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
from ..core.pydantic_utilities import parse_obj_as
from ..core.request_options import RequestOptions
+from ..errors.bad_request_error import BadRequestError
+from ..errors.internal_server_error import InternalServerError
from ..errors.payment_required_error import PaymentRequiredError
from ..errors.too_many_requests_error import TooManyRequestsError
from ..errors.unprocessable_entity_error import UnprocessableEntityError
-from ..types.functions_page_response import FunctionsPageResponse
+from ..types.body_async_form_functions import BodyAsyncFormFunctions
+from ..types.failed_reponse_model_v2 import FailedReponseModelV2
from ..types.generic_error_response import GenericErrorResponse
from ..types.http_validation_error import HttpValidationError
-from ..types.run_settings import RunSettings
-
-# this is used as the default value for optional parameters
-OMIT = typing.cast(typing.Any, ...)
class FunctionsClient:
def __init__(self, *, client_wrapper: SyncClientWrapper):
self._client_wrapper = client_wrapper
- def async_functions(
- self,
- *,
- example_id: typing.Optional[str] = None,
- code: typing.Optional[str] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> FunctionsPageResponse:
+ def async_form_functions(
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormFunctions:
"""
Parameters
----------
example_id : typing.Optional[str]
- code : typing.Optional[str]
- The JS code to be executed.
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used in the code
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- FunctionsPageResponse
+ BodyAsyncFormFunctions
Successful Response
Examples
@@ -60,19 +45,18 @@ def async_functions(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.functions.async_functions()
+ client.functions.async_form_functions()
"""
_response = self._client_wrapper.httpx_client.request(
- "v3/functions/async",
- method="POST",
- params={"example_id": example_id},
- json={"code": code, "variables": variables, "settings": settings},
- request_options=request_options,
- omit=OMIT,
+ "v3/functions/async/form", method="POST", params={"example_id": example_id}, request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(FunctionsPageResponse, parse_obj_as(type_=FunctionsPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(BodyAsyncFormFunctions, parse_obj_as(type_=BodyAsyncFormFunctions, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
if _response.status_code == 402:
raise PaymentRequiredError(
typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
@@ -85,6 +69,10 @@ def async_functions(
raise TooManyRequestsError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
)
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
@@ -126,34 +114,20 @@ class AsyncFunctionsClient:
def __init__(self, *, client_wrapper: AsyncClientWrapper):
self._client_wrapper = client_wrapper
- async def async_functions(
- self,
- *,
- example_id: typing.Optional[str] = None,
- code: typing.Optional[str] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> FunctionsPageResponse:
+ async def async_form_functions(
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormFunctions:
"""
Parameters
----------
example_id : typing.Optional[str]
- code : typing.Optional[str]
- The JS code to be executed.
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used in the code
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- FunctionsPageResponse
+ BodyAsyncFormFunctions
Successful Response
Examples
@@ -168,22 +142,21 @@ async def async_functions(
async def main() -> None:
- await client.functions.async_functions()
+ await client.functions.async_form_functions()
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v3/functions/async",
- method="POST",
- params={"example_id": example_id},
- json={"code": code, "variables": variables, "settings": settings},
- request_options=request_options,
- omit=OMIT,
+ "v3/functions/async/form", method="POST", params={"example_id": example_id}, request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(FunctionsPageResponse, parse_obj_as(type_=FunctionsPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(BodyAsyncFormFunctions, parse_obj_as(type_=BodyAsyncFormFunctions, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
if _response.status_code == 402:
raise PaymentRequiredError(
typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
@@ -196,6 +169,10 @@ async def main() -> None:
raise TooManyRequestsError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
)
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
diff --git a/src/gooey/lip_syncing/__init__.py b/src/gooey/lip_syncing/__init__.py
index 4d094b1..f3ea265 100644
--- a/src/gooey/lip_syncing/__init__.py
+++ b/src/gooey/lip_syncing/__init__.py
@@ -1,5 +1,2 @@
# This file was auto-generated by Fern from our API Definition.
-from .types import LipsyncPageRequestSelectedModel
-
-__all__ = ["LipsyncPageRequestSelectedModel"]
diff --git a/src/gooey/lip_syncing/client.py b/src/gooey/lip_syncing/client.py
index 2e99a69..119f062 100644
--- a/src/gooey/lip_syncing/client.py
+++ b/src/gooey/lip_syncing/client.py
@@ -7,76 +7,35 @@
from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
from ..core.pydantic_utilities import parse_obj_as
from ..core.request_options import RequestOptions
+from ..errors.bad_request_error import BadRequestError
+from ..errors.internal_server_error import InternalServerError
from ..errors.payment_required_error import PaymentRequiredError
from ..errors.too_many_requests_error import TooManyRequestsError
from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.body_async_form_lipsync import BodyAsyncFormLipsync
+from ..types.failed_reponse_model_v2 import FailedReponseModelV2
from ..types.generic_error_response import GenericErrorResponse
from ..types.http_validation_error import HttpValidationError
-from ..types.lipsync_page_response import LipsyncPageResponse
-from ..types.recipe_function import RecipeFunction
-from ..types.run_settings import RunSettings
-from ..types.sad_talker_settings import SadTalkerSettings
-from .types.lipsync_page_request_selected_model import LipsyncPageRequestSelectedModel
-
-# this is used as the default value for optional parameters
-OMIT = typing.cast(typing.Any, ...)
class LipSyncingClient:
def __init__(self, *, client_wrapper: SyncClientWrapper):
self._client_wrapper = client_wrapper
- def async_lipsync(
- self,
- *,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- input_face: typing.Optional[str] = OMIT,
- face_padding_top: typing.Optional[int] = OMIT,
- face_padding_bottom: typing.Optional[int] = OMIT,
- face_padding_left: typing.Optional[int] = OMIT,
- face_padding_right: typing.Optional[int] = OMIT,
- sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT,
- selected_model: typing.Optional[LipsyncPageRequestSelectedModel] = OMIT,
- input_audio: typing.Optional[str] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> LipsyncPageResponse:
+ def async_form_lipsync(
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormLipsync:
"""
Parameters
----------
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- input_face : typing.Optional[str]
-
- face_padding_top : typing.Optional[int]
-
- face_padding_bottom : typing.Optional[int]
-
- face_padding_left : typing.Optional[int]
-
- face_padding_right : typing.Optional[int]
-
- sadtalker_settings : typing.Optional[SadTalkerSettings]
-
- selected_model : typing.Optional[LipsyncPageRequestSelectedModel]
-
- input_audio : typing.Optional[str]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- LipsyncPageResponse
+ BodyAsyncFormLipsync
Successful Response
Examples
@@ -86,31 +45,18 @@ def async_lipsync(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.lip_syncing.async_lipsync()
+ client.lip_syncing.async_form_lipsync()
"""
_response = self._client_wrapper.httpx_client.request(
- "v3/Lipsync/async",
- method="POST",
- params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "input_face": input_face,
- "face_padding_top": face_padding_top,
- "face_padding_bottom": face_padding_bottom,
- "face_padding_left": face_padding_left,
- "face_padding_right": face_padding_right,
- "sadtalker_settings": sadtalker_settings,
- "selected_model": selected_model,
- "input_audio": input_audio,
- "settings": settings,
- },
- request_options=request_options,
- omit=OMIT,
+ "v3/Lipsync/async/form", method="POST", params={"example_id": example_id}, request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(LipsyncPageResponse, parse_obj_as(type_=LipsyncPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(BodyAsyncFormLipsync, parse_obj_as(type_=BodyAsyncFormLipsync, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
if _response.status_code == 402:
raise PaymentRequiredError(
typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
@@ -123,6 +69,10 @@ def async_lipsync(
raise TooManyRequestsError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
)
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
@@ -133,57 +83,20 @@ class AsyncLipSyncingClient:
def __init__(self, *, client_wrapper: AsyncClientWrapper):
self._client_wrapper = client_wrapper
- async def async_lipsync(
- self,
- *,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- input_face: typing.Optional[str] = OMIT,
- face_padding_top: typing.Optional[int] = OMIT,
- face_padding_bottom: typing.Optional[int] = OMIT,
- face_padding_left: typing.Optional[int] = OMIT,
- face_padding_right: typing.Optional[int] = OMIT,
- sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT,
- selected_model: typing.Optional[LipsyncPageRequestSelectedModel] = OMIT,
- input_audio: typing.Optional[str] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> LipsyncPageResponse:
+ async def async_form_lipsync(
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormLipsync:
"""
Parameters
----------
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- input_face : typing.Optional[str]
-
- face_padding_top : typing.Optional[int]
-
- face_padding_bottom : typing.Optional[int]
-
- face_padding_left : typing.Optional[int]
-
- face_padding_right : typing.Optional[int]
-
- sadtalker_settings : typing.Optional[SadTalkerSettings]
-
- selected_model : typing.Optional[LipsyncPageRequestSelectedModel]
-
- input_audio : typing.Optional[str]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- LipsyncPageResponse
+ BodyAsyncFormLipsync
Successful Response
Examples
@@ -198,34 +111,21 @@ async def async_lipsync(
async def main() -> None:
- await client.lip_syncing.async_lipsync()
+ await client.lip_syncing.async_form_lipsync()
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v3/Lipsync/async",
- method="POST",
- params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "input_face": input_face,
- "face_padding_top": face_padding_top,
- "face_padding_bottom": face_padding_bottom,
- "face_padding_left": face_padding_left,
- "face_padding_right": face_padding_right,
- "sadtalker_settings": sadtalker_settings,
- "selected_model": selected_model,
- "input_audio": input_audio,
- "settings": settings,
- },
- request_options=request_options,
- omit=OMIT,
+ "v3/Lipsync/async/form", method="POST", params={"example_id": example_id}, request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(LipsyncPageResponse, parse_obj_as(type_=LipsyncPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(BodyAsyncFormLipsync, parse_obj_as(type_=BodyAsyncFormLipsync, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
if _response.status_code == 402:
raise PaymentRequiredError(
typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
@@ -238,6 +138,10 @@ async def main() -> None:
raise TooManyRequestsError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
)
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
diff --git a/src/gooey/lip_syncing/types/__init__.py b/src/gooey/lip_syncing/types/__init__.py
deleted file mode 100644
index e7e3b85..0000000
--- a/src/gooey/lip_syncing/types/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from .lipsync_page_request_selected_model import LipsyncPageRequestSelectedModel
-
-__all__ = ["LipsyncPageRequestSelectedModel"]
diff --git a/src/gooey/smart_gpt/__init__.py b/src/gooey/smart_gpt/__init__.py
index fce5f3e..f3ea265 100644
--- a/src/gooey/smart_gpt/__init__.py
+++ b/src/gooey/smart_gpt/__init__.py
@@ -1,5 +1,2 @@
# This file was auto-generated by Fern from our API Definition.
-from .types import SmartGptPageRequestResponseFormatType, SmartGptPageRequestSelectedModel
-
-__all__ = ["SmartGptPageRequestResponseFormatType", "SmartGptPageRequestSelectedModel"]
diff --git a/src/gooey/smart_gpt/client.py b/src/gooey/smart_gpt/client.py
index 426212b..d06562d 100644
--- a/src/gooey/smart_gpt/client.py
+++ b/src/gooey/smart_gpt/client.py
@@ -7,85 +7,35 @@
from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
from ..core.pydantic_utilities import parse_obj_as
from ..core.request_options import RequestOptions
+from ..errors.bad_request_error import BadRequestError
+from ..errors.internal_server_error import InternalServerError
from ..errors.payment_required_error import PaymentRequiredError
from ..errors.too_many_requests_error import TooManyRequestsError
from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.body_async_form_smart_gpt import BodyAsyncFormSmartGpt
+from ..types.failed_reponse_model_v2 import FailedReponseModelV2
from ..types.generic_error_response import GenericErrorResponse
from ..types.http_validation_error import HttpValidationError
-from ..types.recipe_function import RecipeFunction
-from ..types.run_settings import RunSettings
-from ..types.smart_gpt_page_response import SmartGptPageResponse
-from .types.smart_gpt_page_request_response_format_type import SmartGptPageRequestResponseFormatType
-from .types.smart_gpt_page_request_selected_model import SmartGptPageRequestSelectedModel
-
-# this is used as the default value for optional parameters
-OMIT = typing.cast(typing.Any, ...)
class SmartGptClient:
def __init__(self, *, client_wrapper: SyncClientWrapper):
self._client_wrapper = client_wrapper
- def async_smart_gpt(
- self,
- *,
- input_prompt: str,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- cot_prompt: typing.Optional[str] = OMIT,
- reflexion_prompt: typing.Optional[str] = OMIT,
- dera_prompt: typing.Optional[str] = OMIT,
- selected_model: typing.Optional[SmartGptPageRequestSelectedModel] = OMIT,
- avoid_repetition: typing.Optional[bool] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[float] = OMIT,
- max_tokens: typing.Optional[int] = OMIT,
- sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[SmartGptPageRequestResponseFormatType] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> SmartGptPageResponse:
+ def async_form_smart_gpt(
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormSmartGpt:
"""
Parameters
----------
- input_prompt : str
-
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- cot_prompt : typing.Optional[str]
-
- reflexion_prompt : typing.Optional[str]
-
- dera_prompt : typing.Optional[str]
-
- selected_model : typing.Optional[SmartGptPageRequestSelectedModel]
-
- avoid_repetition : typing.Optional[bool]
-
- num_outputs : typing.Optional[int]
-
- quality : typing.Optional[float]
-
- max_tokens : typing.Optional[int]
-
- sampling_temperature : typing.Optional[float]
-
- response_format_type : typing.Optional[SmartGptPageRequestResponseFormatType]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- SmartGptPageResponse
+ BodyAsyncFormSmartGpt
Successful Response
Examples
@@ -95,36 +45,18 @@ def async_smart_gpt(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.smart_gpt.async_smart_gpt(
- input_prompt="input_prompt",
- )
+ client.smart_gpt.async_form_smart_gpt()
"""
_response = self._client_wrapper.httpx_client.request(
- "v3/SmartGPT/async",
- method="POST",
- params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "input_prompt": input_prompt,
- "cot_prompt": cot_prompt,
- "reflexion_prompt": reflexion_prompt,
- "dera_prompt": dera_prompt,
- "selected_model": selected_model,
- "avoid_repetition": avoid_repetition,
- "num_outputs": num_outputs,
- "quality": quality,
- "max_tokens": max_tokens,
- "sampling_temperature": sampling_temperature,
- "response_format_type": response_format_type,
- "settings": settings,
- },
- request_options=request_options,
- omit=OMIT,
+ "v3/SmartGPT/async/form", method="POST", params={"example_id": example_id}, request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(SmartGptPageResponse, parse_obj_as(type_=SmartGptPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(BodyAsyncFormSmartGpt, parse_obj_as(type_=BodyAsyncFormSmartGpt, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
if _response.status_code == 402:
raise PaymentRequiredError(
typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
@@ -137,6 +69,10 @@ def async_smart_gpt(
raise TooManyRequestsError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
)
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
@@ -178,66 +114,20 @@ class AsyncSmartGptClient:
def __init__(self, *, client_wrapper: AsyncClientWrapper):
self._client_wrapper = client_wrapper
- async def async_smart_gpt(
- self,
- *,
- input_prompt: str,
- example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- cot_prompt: typing.Optional[str] = OMIT,
- reflexion_prompt: typing.Optional[str] = OMIT,
- dera_prompt: typing.Optional[str] = OMIT,
- selected_model: typing.Optional[SmartGptPageRequestSelectedModel] = OMIT,
- avoid_repetition: typing.Optional[bool] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[float] = OMIT,
- max_tokens: typing.Optional[int] = OMIT,
- sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[SmartGptPageRequestResponseFormatType] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
- request_options: typing.Optional[RequestOptions] = None
- ) -> SmartGptPageResponse:
+ async def async_form_smart_gpt(
+ self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
+ ) -> BodyAsyncFormSmartGpt:
"""
Parameters
----------
- input_prompt : str
-
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
-
- variables : typing.Optional[typing.Dict[str, typing.Any]]
- Variables to be used as Jinja prompt templates and in functions as arguments
-
- cot_prompt : typing.Optional[str]
-
- reflexion_prompt : typing.Optional[str]
-
- dera_prompt : typing.Optional[str]
-
- selected_model : typing.Optional[SmartGptPageRequestSelectedModel]
-
- avoid_repetition : typing.Optional[bool]
-
- num_outputs : typing.Optional[int]
-
- quality : typing.Optional[float]
-
- max_tokens : typing.Optional[int]
-
- sampling_temperature : typing.Optional[float]
-
- response_format_type : typing.Optional[SmartGptPageRequestResponseFormatType]
-
- settings : typing.Optional[RunSettings]
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- SmartGptPageResponse
+ BodyAsyncFormSmartGpt
Successful Response
Examples
@@ -252,39 +142,21 @@ async def async_smart_gpt(
async def main() -> None:
- await client.smart_gpt.async_smart_gpt(
- input_prompt="input_prompt",
- )
+ await client.smart_gpt.async_form_smart_gpt()
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v3/SmartGPT/async",
- method="POST",
- params={"example_id": example_id},
- json={
- "functions": functions,
- "variables": variables,
- "input_prompt": input_prompt,
- "cot_prompt": cot_prompt,
- "reflexion_prompt": reflexion_prompt,
- "dera_prompt": dera_prompt,
- "selected_model": selected_model,
- "avoid_repetition": avoid_repetition,
- "num_outputs": num_outputs,
- "quality": quality,
- "max_tokens": max_tokens,
- "sampling_temperature": sampling_temperature,
- "response_format_type": response_format_type,
- "settings": settings,
- },
- request_options=request_options,
- omit=OMIT,
+ "v3/SmartGPT/async/form", method="POST", params={"example_id": example_id}, request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(SmartGptPageResponse, parse_obj_as(type_=SmartGptPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(BodyAsyncFormSmartGpt, parse_obj_as(type_=BodyAsyncFormSmartGpt, object_=_response.json())) # type: ignore
+ if _response.status_code == 400:
+ raise BadRequestError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
if _response.status_code == 402:
raise PaymentRequiredError(
typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
@@ -297,6 +169,10 @@ async def main() -> None:
raise TooManyRequestsError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
)
+ if _response.status_code == 500:
+ raise InternalServerError(
+ typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
diff --git a/src/gooey/smart_gpt/types/__init__.py b/src/gooey/smart_gpt/types/__init__.py
deleted file mode 100644
index 3032d41..0000000
--- a/src/gooey/smart_gpt/types/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from .smart_gpt_page_request_response_format_type import SmartGptPageRequestResponseFormatType
-from .smart_gpt_page_request_selected_model import SmartGptPageRequestSelectedModel
-
-__all__ = ["SmartGptPageRequestResponseFormatType", "SmartGptPageRequestSelectedModel"]
diff --git a/src/gooey/types/__init__.py b/src/gooey/types/__init__.py
index d49ace4..64690f3 100644
--- a/src/gooey/types/__init__.py
+++ b/src/gooey/types/__init__.py
@@ -9,6 +9,7 @@
from .asr_output_json import AsrOutputJson
from .asr_page_output import AsrPageOutput
from .asr_page_output_output_text_item import AsrPageOutputOutputTextItem
+from .asr_page_request import AsrPageRequest
from .asr_page_request_output_format import AsrPageRequestOutputFormat
from .asr_page_request_selected_model import AsrPageRequestSelectedModel
from .asr_page_request_translation_model import AsrPageRequestTranslationModel
@@ -16,11 +17,48 @@
from .asr_page_status_response import AsrPageStatusResponse
from .async_api_response_model_v3 import AsyncApiResponseModelV3
from .balance_response import BalanceResponse
+from .body_async_form_art_qr_code import BodyAsyncFormArtQrCode
+from .body_async_form_asr import BodyAsyncFormAsr
+from .body_async_form_bulk_eval import BodyAsyncFormBulkEval
+from .body_async_form_bulk_runner import BodyAsyncFormBulkRunner
+from .body_async_form_chyron_plant import BodyAsyncFormChyronPlant
+from .body_async_form_compare_ai_upscalers import BodyAsyncFormCompareAiUpscalers
+from .body_async_form_compare_llm import BodyAsyncFormCompareLlm
+from .body_async_form_compare_text2img import BodyAsyncFormCompareText2Img
+from .body_async_form_deforum_sd import BodyAsyncFormDeforumSd
+from .body_async_form_doc_extract import BodyAsyncFormDocExtract
+from .body_async_form_doc_search import BodyAsyncFormDocSearch
+from .body_async_form_doc_summary import BodyAsyncFormDocSummary
+from .body_async_form_email_face_inpainting import BodyAsyncFormEmailFaceInpainting
+from .body_async_form_embeddings import BodyAsyncFormEmbeddings
+from .body_async_form_face_inpainting import BodyAsyncFormFaceInpainting
+from .body_async_form_functions import BodyAsyncFormFunctions
+from .body_async_form_google_gpt import BodyAsyncFormGoogleGpt
+from .body_async_form_google_image_gen import BodyAsyncFormGoogleImageGen
+from .body_async_form_image_segmentation import BodyAsyncFormImageSegmentation
+from .body_async_form_img2img import BodyAsyncFormImg2Img
+from .body_async_form_letter_writer import BodyAsyncFormLetterWriter
+from .body_async_form_lipsync import BodyAsyncFormLipsync
+from .body_async_form_lipsync_tts import BodyAsyncFormLipsyncTts
+from .body_async_form_object_inpainting import BodyAsyncFormObjectInpainting
+from .body_async_form_related_qna_maker import BodyAsyncFormRelatedQnaMaker
+from .body_async_form_related_qna_maker_doc import BodyAsyncFormRelatedQnaMakerDoc
+from .body_async_form_seo_summary import BodyAsyncFormSeoSummary
+from .body_async_form_smart_gpt import BodyAsyncFormSmartGpt
+from .body_async_form_social_lookup_email import BodyAsyncFormSocialLookupEmail
+from .body_async_form_text2audio import BodyAsyncFormText2Audio
+from .body_async_form_text_to_speech import BodyAsyncFormTextToSpeech
+from .body_async_form_translate import BodyAsyncFormTranslate
+from .body_async_form_video_bots import BodyAsyncFormVideoBots
from .bot_broadcast_filters import BotBroadcastFilters
from .bulk_eval_page_output import BulkEvalPageOutput
+from .bulk_eval_page_request import BulkEvalPageRequest
+from .bulk_eval_page_request_response_format_type import BulkEvalPageRequestResponseFormatType
+from .bulk_eval_page_request_selected_model import BulkEvalPageRequestSelectedModel
from .bulk_eval_page_response import BulkEvalPageResponse
from .bulk_eval_page_status_response import BulkEvalPageStatusResponse
from .bulk_runner_page_output import BulkRunnerPageOutput
+from .bulk_runner_page_request import BulkRunnerPageRequest
from .bulk_runner_page_response import BulkRunnerPageResponse
from .bulk_runner_page_status_response import BulkRunnerPageStatusResponse
from .button_pressed import ButtonPressed
@@ -33,16 +71,19 @@
from .chyron_plant_page_response import ChyronPlantPageResponse
from .chyron_plant_page_status_response import ChyronPlantPageStatusResponse
from .compare_llm_page_output import CompareLlmPageOutput
+from .compare_llm_page_request import CompareLlmPageRequest
from .compare_llm_page_request_response_format_type import CompareLlmPageRequestResponseFormatType
from .compare_llm_page_request_selected_models_item import CompareLlmPageRequestSelectedModelsItem
from .compare_llm_page_response import CompareLlmPageResponse
from .compare_llm_page_status_response import CompareLlmPageStatusResponse
from .compare_text2img_page_output import CompareText2ImgPageOutput
+from .compare_text2img_page_request import CompareText2ImgPageRequest
from .compare_text2img_page_request_scheduler import CompareText2ImgPageRequestScheduler
from .compare_text2img_page_request_selected_models_item import CompareText2ImgPageRequestSelectedModelsItem
from .compare_text2img_page_response import CompareText2ImgPageResponse
from .compare_text2img_page_status_response import CompareText2ImgPageStatusResponse
from .compare_upscaler_page_output import CompareUpscalerPageOutput
+from .compare_upscaler_page_request import CompareUpscalerPageRequest
from .compare_upscaler_page_request_selected_models_item import CompareUpscalerPageRequestSelectedModelsItem
from .compare_upscaler_page_response import CompareUpscalerPageResponse
from .compare_upscaler_page_status_response import CompareUpscalerPageStatusResponse
@@ -59,16 +100,19 @@
from .conversation_start import ConversationStart
from .create_stream_response import CreateStreamResponse
from .deforum_sd_page_output import DeforumSdPageOutput
+from .deforum_sd_page_request import DeforumSdPageRequest
from .deforum_sd_page_request_selected_model import DeforumSdPageRequestSelectedModel
from .deforum_sd_page_response import DeforumSdPageResponse
from .deforum_sd_page_status_response import DeforumSdPageStatusResponse
from .doc_extract_page_output import DocExtractPageOutput
+from .doc_extract_page_request import DocExtractPageRequest
from .doc_extract_page_request_response_format_type import DocExtractPageRequestResponseFormatType
from .doc_extract_page_request_selected_asr_model import DocExtractPageRequestSelectedAsrModel
from .doc_extract_page_request_selected_model import DocExtractPageRequestSelectedModel
from .doc_extract_page_response import DocExtractPageResponse
from .doc_extract_page_status_response import DocExtractPageStatusResponse
from .doc_search_page_output import DocSearchPageOutput
+from .doc_search_page_request import DocSearchPageRequest
from .doc_search_page_request_citation_style import DocSearchPageRequestCitationStyle
from .doc_search_page_request_embedding_model import DocSearchPageRequestEmbeddingModel
from .doc_search_page_request_keyword_query import DocSearchPageRequestKeywordQuery
@@ -77,21 +121,25 @@
from .doc_search_page_response import DocSearchPageResponse
from .doc_search_page_status_response import DocSearchPageStatusResponse
from .doc_summary_page_output import DocSummaryPageOutput
+from .doc_summary_page_request import DocSummaryPageRequest
from .doc_summary_page_request_response_format_type import DocSummaryPageRequestResponseFormatType
from .doc_summary_page_request_selected_asr_model import DocSummaryPageRequestSelectedAsrModel
from .doc_summary_page_request_selected_model import DocSummaryPageRequestSelectedModel
from .doc_summary_page_response import DocSummaryPageResponse
from .doc_summary_page_status_response import DocSummaryPageStatusResponse
from .email_face_inpainting_page_output import EmailFaceInpaintingPageOutput
+from .email_face_inpainting_page_request import EmailFaceInpaintingPageRequest
from .email_face_inpainting_page_request_selected_model import EmailFaceInpaintingPageRequestSelectedModel
from .email_face_inpainting_page_response import EmailFaceInpaintingPageResponse
from .email_face_inpainting_page_status_response import EmailFaceInpaintingPageStatusResponse
from .embeddings_page_output import EmbeddingsPageOutput
+from .embeddings_page_request import EmbeddingsPageRequest
from .embeddings_page_request_selected_model import EmbeddingsPageRequestSelectedModel
from .embeddings_page_response import EmbeddingsPageResponse
from .embeddings_page_status_response import EmbeddingsPageStatusResponse
from .eval_prompt import EvalPrompt
from .face_inpainting_page_output import FaceInpaintingPageOutput
+from .face_inpainting_page_request import FaceInpaintingPageRequest
from .face_inpainting_page_request_selected_model import FaceInpaintingPageRequestSelectedModel
from .face_inpainting_page_response import FaceInpaintingPageResponse
from .face_inpainting_page_status_response import FaceInpaintingPageStatusResponse
@@ -99,28 +147,33 @@
from .failed_response_detail import FailedResponseDetail
from .final_response import FinalResponse
from .functions_page_output import FunctionsPageOutput
+from .functions_page_request import FunctionsPageRequest
from .functions_page_response import FunctionsPageResponse
from .functions_page_status_response import FunctionsPageStatusResponse
from .generic_error_response import GenericErrorResponse
from .generic_error_response_detail import GenericErrorResponseDetail
from .google_gpt_page_output import GoogleGptPageOutput
+from .google_gpt_page_request import GoogleGptPageRequest
from .google_gpt_page_request_embedding_model import GoogleGptPageRequestEmbeddingModel
from .google_gpt_page_request_response_format_type import GoogleGptPageRequestResponseFormatType
from .google_gpt_page_request_selected_model import GoogleGptPageRequestSelectedModel
from .google_gpt_page_response import GoogleGptPageResponse
from .google_gpt_page_status_response import GoogleGptPageStatusResponse
from .google_image_gen_page_output import GoogleImageGenPageOutput
+from .google_image_gen_page_request import GoogleImageGenPageRequest
from .google_image_gen_page_request_selected_model import GoogleImageGenPageRequestSelectedModel
from .google_image_gen_page_response import GoogleImageGenPageResponse
from .google_image_gen_page_status_response import GoogleImageGenPageStatusResponse
from .http_validation_error import HttpValidationError
from .image_segmentation_page_output import ImageSegmentationPageOutput
+from .image_segmentation_page_request import ImageSegmentationPageRequest
from .image_segmentation_page_request_selected_model import ImageSegmentationPageRequestSelectedModel
from .image_segmentation_page_response import ImageSegmentationPageResponse
from .image_segmentation_page_status_response import ImageSegmentationPageStatusResponse
from .image_url import ImageUrl
from .image_url_detail import ImageUrlDetail
from .img2img_page_output import Img2ImgPageOutput
+from .img2img_page_request import Img2ImgPageRequest
from .img2img_page_request_selected_controlnet_model import Img2ImgPageRequestSelectedControlnetModel
from .img2img_page_request_selected_controlnet_model_item import Img2ImgPageRequestSelectedControlnetModelItem
from .img2img_page_request_selected_model import Img2ImgPageRequestSelectedModel
@@ -131,9 +184,12 @@
from .letter_writer_page_response import LetterWriterPageResponse
from .letter_writer_page_status_response import LetterWriterPageStatusResponse
from .lipsync_page_output import LipsyncPageOutput
+from .lipsync_page_request import LipsyncPageRequest
+from .lipsync_page_request_selected_model import LipsyncPageRequestSelectedModel
from .lipsync_page_response import LipsyncPageResponse
from .lipsync_page_status_response import LipsyncPageStatusResponse
from .lipsync_tts_page_output import LipsyncTtsPageOutput
+from .lipsync_tts_page_request import LipsyncTtsPageRequest
from .lipsync_tts_page_request_openai_tts_model import LipsyncTtsPageRequestOpenaiTtsModel
from .lipsync_tts_page_request_openai_voice_name import LipsyncTtsPageRequestOpenaiVoiceName
from .lipsync_tts_page_request_selected_model import LipsyncTtsPageRequestSelectedModel
@@ -143,12 +199,14 @@
from .llm_tools import LlmTools
from .message_part import MessagePart
from .object_inpainting_page_output import ObjectInpaintingPageOutput
+from .object_inpainting_page_request import ObjectInpaintingPageRequest
from .object_inpainting_page_request_selected_model import ObjectInpaintingPageRequestSelectedModel
from .object_inpainting_page_response import ObjectInpaintingPageResponse
from .object_inpainting_page_status_response import ObjectInpaintingPageStatusResponse
from .prompt_tree_node import PromptTreeNode
from .prompt_tree_node_prompt import PromptTreeNodePrompt
from .qr_code_generator_page_output import QrCodeGeneratorPageOutput
+from .qr_code_generator_page_request import QrCodeGeneratorPageRequest
from .qr_code_generator_page_request_image_prompt_controlnet_models_item import (
QrCodeGeneratorPageRequestImagePromptControlnetModelsItem,
)
@@ -165,6 +223,7 @@
from .related_doc_search_response import RelatedDocSearchResponse
from .related_google_gpt_response import RelatedGoogleGptResponse
from .related_qn_a_doc_page_output import RelatedQnADocPageOutput
+from .related_qn_a_doc_page_request import RelatedQnADocPageRequest
from .related_qn_a_doc_page_request_citation_style import RelatedQnADocPageRequestCitationStyle
from .related_qn_a_doc_page_request_embedding_model import RelatedQnADocPageRequestEmbeddingModel
from .related_qn_a_doc_page_request_keyword_query import RelatedQnADocPageRequestKeywordQuery
@@ -173,6 +232,7 @@
from .related_qn_a_doc_page_response import RelatedQnADocPageResponse
from .related_qn_a_doc_page_status_response import RelatedQnADocPageStatusResponse
from .related_qn_a_page_output import RelatedQnAPageOutput
+from .related_qn_a_page_request import RelatedQnAPageRequest
from .related_qn_a_page_request_embedding_model import RelatedQnAPageRequestEmbeddingModel
from .related_qn_a_page_request_response_format_type import RelatedQnAPageRequestResponseFormatType
from .related_qn_a_page_request_selected_model import RelatedQnAPageRequestSelectedModel
@@ -189,6 +249,7 @@
from .sad_talker_settings_preprocess import SadTalkerSettingsPreprocess
from .search_reference import SearchReference
from .seo_summary_page_output import SeoSummaryPageOutput
+from .seo_summary_page_request import SeoSummaryPageRequest
from .seo_summary_page_request_response_format_type import SeoSummaryPageRequestResponseFormatType
from .seo_summary_page_request_selected_model import SeoSummaryPageRequestSelectedModel
from .seo_summary_page_response import SeoSummaryPageResponse
@@ -196,18 +257,24 @@
from .serp_search_location import SerpSearchLocation
from .serp_search_type import SerpSearchType
from .smart_gpt_page_output import SmartGptPageOutput
+from .smart_gpt_page_request import SmartGptPageRequest
+from .smart_gpt_page_request_response_format_type import SmartGptPageRequestResponseFormatType
+from .smart_gpt_page_request_selected_model import SmartGptPageRequestSelectedModel
from .smart_gpt_page_response import SmartGptPageResponse
from .smart_gpt_page_status_response import SmartGptPageStatusResponse
from .social_lookup_email_page_output import SocialLookupEmailPageOutput
+from .social_lookup_email_page_request import SocialLookupEmailPageRequest
from .social_lookup_email_page_request_response_format_type import SocialLookupEmailPageRequestResponseFormatType
from .social_lookup_email_page_request_selected_model import SocialLookupEmailPageRequestSelectedModel
from .social_lookup_email_page_response import SocialLookupEmailPageResponse
from .social_lookup_email_page_status_response import SocialLookupEmailPageStatusResponse
from .stream_error import StreamError
from .text2audio_page_output import Text2AudioPageOutput
+from .text2audio_page_request import Text2AudioPageRequest
from .text2audio_page_response import Text2AudioPageResponse
from .text2audio_page_status_response import Text2AudioPageStatusResponse
from .text_to_speech_page_output import TextToSpeechPageOutput
+from .text_to_speech_page_request import TextToSpeechPageRequest
from .text_to_speech_page_request_openai_tts_model import TextToSpeechPageRequestOpenaiTtsModel
from .text_to_speech_page_request_openai_voice_name import TextToSpeechPageRequestOpenaiVoiceName
from .text_to_speech_page_request_tts_provider import TextToSpeechPageRequestTtsProvider
@@ -215,6 +282,7 @@
from .text_to_speech_page_status_response import TextToSpeechPageStatusResponse
from .training_data_model import TrainingDataModel
from .translation_page_output import TranslationPageOutput
+from .translation_page_request import TranslationPageRequest
from .translation_page_request_selected_model import TranslationPageRequestSelectedModel
from .translation_page_response import TranslationPageResponse
from .translation_page_status_response import TranslationPageStatusResponse
@@ -224,6 +292,17 @@
from .video_bots_page_output import VideoBotsPageOutput
from .video_bots_page_output_final_keyword_query import VideoBotsPageOutputFinalKeywordQuery
from .video_bots_page_output_final_prompt import VideoBotsPageOutputFinalPrompt
+from .video_bots_page_request import VideoBotsPageRequest
+from .video_bots_page_request_asr_model import VideoBotsPageRequestAsrModel
+from .video_bots_page_request_citation_style import VideoBotsPageRequestCitationStyle
+from .video_bots_page_request_embedding_model import VideoBotsPageRequestEmbeddingModel
+from .video_bots_page_request_lipsync_model import VideoBotsPageRequestLipsyncModel
+from .video_bots_page_request_openai_tts_model import VideoBotsPageRequestOpenaiTtsModel
+from .video_bots_page_request_openai_voice_name import VideoBotsPageRequestOpenaiVoiceName
+from .video_bots_page_request_response_format_type import VideoBotsPageRequestResponseFormatType
+from .video_bots_page_request_selected_model import VideoBotsPageRequestSelectedModel
+from .video_bots_page_request_translation_model import VideoBotsPageRequestTranslationModel
+from .video_bots_page_request_tts_provider import VideoBotsPageRequestTtsProvider
from .video_bots_page_response import VideoBotsPageResponse
from .video_bots_page_status_response import VideoBotsPageStatusResponse
@@ -237,6 +316,7 @@
"AsrOutputJson",
"AsrPageOutput",
"AsrPageOutputOutputTextItem",
+ "AsrPageRequest",
"AsrPageRequestOutputFormat",
"AsrPageRequestSelectedModel",
"AsrPageRequestTranslationModel",
@@ -244,11 +324,48 @@
"AsrPageStatusResponse",
"AsyncApiResponseModelV3",
"BalanceResponse",
+ "BodyAsyncFormArtQrCode",
+ "BodyAsyncFormAsr",
+ "BodyAsyncFormBulkEval",
+ "BodyAsyncFormBulkRunner",
+ "BodyAsyncFormChyronPlant",
+ "BodyAsyncFormCompareAiUpscalers",
+ "BodyAsyncFormCompareLlm",
+ "BodyAsyncFormCompareText2Img",
+ "BodyAsyncFormDeforumSd",
+ "BodyAsyncFormDocExtract",
+ "BodyAsyncFormDocSearch",
+ "BodyAsyncFormDocSummary",
+ "BodyAsyncFormEmailFaceInpainting",
+ "BodyAsyncFormEmbeddings",
+ "BodyAsyncFormFaceInpainting",
+ "BodyAsyncFormFunctions",
+ "BodyAsyncFormGoogleGpt",
+ "BodyAsyncFormGoogleImageGen",
+ "BodyAsyncFormImageSegmentation",
+ "BodyAsyncFormImg2Img",
+ "BodyAsyncFormLetterWriter",
+ "BodyAsyncFormLipsync",
+ "BodyAsyncFormLipsyncTts",
+ "BodyAsyncFormObjectInpainting",
+ "BodyAsyncFormRelatedQnaMaker",
+ "BodyAsyncFormRelatedQnaMakerDoc",
+ "BodyAsyncFormSeoSummary",
+ "BodyAsyncFormSmartGpt",
+ "BodyAsyncFormSocialLookupEmail",
+ "BodyAsyncFormText2Audio",
+ "BodyAsyncFormTextToSpeech",
+ "BodyAsyncFormTranslate",
+ "BodyAsyncFormVideoBots",
"BotBroadcastFilters",
"BulkEvalPageOutput",
+ "BulkEvalPageRequest",
+ "BulkEvalPageRequestResponseFormatType",
+ "BulkEvalPageRequestSelectedModel",
"BulkEvalPageResponse",
"BulkEvalPageStatusResponse",
"BulkRunnerPageOutput",
+ "BulkRunnerPageRequest",
"BulkRunnerPageResponse",
"BulkRunnerPageStatusResponse",
"ButtonPressed",
@@ -261,16 +378,19 @@
"ChyronPlantPageResponse",
"ChyronPlantPageStatusResponse",
"CompareLlmPageOutput",
+ "CompareLlmPageRequest",
"CompareLlmPageRequestResponseFormatType",
"CompareLlmPageRequestSelectedModelsItem",
"CompareLlmPageResponse",
"CompareLlmPageStatusResponse",
"CompareText2ImgPageOutput",
+ "CompareText2ImgPageRequest",
"CompareText2ImgPageRequestScheduler",
"CompareText2ImgPageRequestSelectedModelsItem",
"CompareText2ImgPageResponse",
"CompareText2ImgPageStatusResponse",
"CompareUpscalerPageOutput",
+ "CompareUpscalerPageRequest",
"CompareUpscalerPageRequestSelectedModelsItem",
"CompareUpscalerPageResponse",
"CompareUpscalerPageStatusResponse",
@@ -285,16 +405,19 @@
"ConversationStart",
"CreateStreamResponse",
"DeforumSdPageOutput",
+ "DeforumSdPageRequest",
"DeforumSdPageRequestSelectedModel",
"DeforumSdPageResponse",
"DeforumSdPageStatusResponse",
"DocExtractPageOutput",
+ "DocExtractPageRequest",
"DocExtractPageRequestResponseFormatType",
"DocExtractPageRequestSelectedAsrModel",
"DocExtractPageRequestSelectedModel",
"DocExtractPageResponse",
"DocExtractPageStatusResponse",
"DocSearchPageOutput",
+ "DocSearchPageRequest",
"DocSearchPageRequestCitationStyle",
"DocSearchPageRequestEmbeddingModel",
"DocSearchPageRequestKeywordQuery",
@@ -303,21 +426,25 @@
"DocSearchPageResponse",
"DocSearchPageStatusResponse",
"DocSummaryPageOutput",
+ "DocSummaryPageRequest",
"DocSummaryPageRequestResponseFormatType",
"DocSummaryPageRequestSelectedAsrModel",
"DocSummaryPageRequestSelectedModel",
"DocSummaryPageResponse",
"DocSummaryPageStatusResponse",
"EmailFaceInpaintingPageOutput",
+ "EmailFaceInpaintingPageRequest",
"EmailFaceInpaintingPageRequestSelectedModel",
"EmailFaceInpaintingPageResponse",
"EmailFaceInpaintingPageStatusResponse",
"EmbeddingsPageOutput",
+ "EmbeddingsPageRequest",
"EmbeddingsPageRequestSelectedModel",
"EmbeddingsPageResponse",
"EmbeddingsPageStatusResponse",
"EvalPrompt",
"FaceInpaintingPageOutput",
+ "FaceInpaintingPageRequest",
"FaceInpaintingPageRequestSelectedModel",
"FaceInpaintingPageResponse",
"FaceInpaintingPageStatusResponse",
@@ -325,28 +452,33 @@
"FailedResponseDetail",
"FinalResponse",
"FunctionsPageOutput",
+ "FunctionsPageRequest",
"FunctionsPageResponse",
"FunctionsPageStatusResponse",
"GenericErrorResponse",
"GenericErrorResponseDetail",
"GoogleGptPageOutput",
+ "GoogleGptPageRequest",
"GoogleGptPageRequestEmbeddingModel",
"GoogleGptPageRequestResponseFormatType",
"GoogleGptPageRequestSelectedModel",
"GoogleGptPageResponse",
"GoogleGptPageStatusResponse",
"GoogleImageGenPageOutput",
+ "GoogleImageGenPageRequest",
"GoogleImageGenPageRequestSelectedModel",
"GoogleImageGenPageResponse",
"GoogleImageGenPageStatusResponse",
"HttpValidationError",
"ImageSegmentationPageOutput",
+ "ImageSegmentationPageRequest",
"ImageSegmentationPageRequestSelectedModel",
"ImageSegmentationPageResponse",
"ImageSegmentationPageStatusResponse",
"ImageUrl",
"ImageUrlDetail",
"Img2ImgPageOutput",
+ "Img2ImgPageRequest",
"Img2ImgPageRequestSelectedControlnetModel",
"Img2ImgPageRequestSelectedControlnetModelItem",
"Img2ImgPageRequestSelectedModel",
@@ -357,9 +489,12 @@
"LetterWriterPageResponse",
"LetterWriterPageStatusResponse",
"LipsyncPageOutput",
+ "LipsyncPageRequest",
+ "LipsyncPageRequestSelectedModel",
"LipsyncPageResponse",
"LipsyncPageStatusResponse",
"LipsyncTtsPageOutput",
+ "LipsyncTtsPageRequest",
"LipsyncTtsPageRequestOpenaiTtsModel",
"LipsyncTtsPageRequestOpenaiVoiceName",
"LipsyncTtsPageRequestSelectedModel",
@@ -369,12 +504,14 @@
"LlmTools",
"MessagePart",
"ObjectInpaintingPageOutput",
+ "ObjectInpaintingPageRequest",
"ObjectInpaintingPageRequestSelectedModel",
"ObjectInpaintingPageResponse",
"ObjectInpaintingPageStatusResponse",
"PromptTreeNode",
"PromptTreeNodePrompt",
"QrCodeGeneratorPageOutput",
+ "QrCodeGeneratorPageRequest",
"QrCodeGeneratorPageRequestImagePromptControlnetModelsItem",
"QrCodeGeneratorPageRequestScheduler",
"QrCodeGeneratorPageRequestSelectedControlnetModelItem",
@@ -387,6 +524,7 @@
"RelatedDocSearchResponse",
"RelatedGoogleGptResponse",
"RelatedQnADocPageOutput",
+ "RelatedQnADocPageRequest",
"RelatedQnADocPageRequestCitationStyle",
"RelatedQnADocPageRequestEmbeddingModel",
"RelatedQnADocPageRequestKeywordQuery",
@@ -395,6 +533,7 @@
"RelatedQnADocPageResponse",
"RelatedQnADocPageStatusResponse",
"RelatedQnAPageOutput",
+ "RelatedQnAPageRequest",
"RelatedQnAPageRequestEmbeddingModel",
"RelatedQnAPageRequestResponseFormatType",
"RelatedQnAPageRequestSelectedModel",
@@ -411,6 +550,7 @@
"SadTalkerSettingsPreprocess",
"SearchReference",
"SeoSummaryPageOutput",
+ "SeoSummaryPageRequest",
"SeoSummaryPageRequestResponseFormatType",
"SeoSummaryPageRequestSelectedModel",
"SeoSummaryPageResponse",
@@ -418,18 +558,24 @@
"SerpSearchLocation",
"SerpSearchType",
"SmartGptPageOutput",
+ "SmartGptPageRequest",
+ "SmartGptPageRequestResponseFormatType",
+ "SmartGptPageRequestSelectedModel",
"SmartGptPageResponse",
"SmartGptPageStatusResponse",
"SocialLookupEmailPageOutput",
+ "SocialLookupEmailPageRequest",
"SocialLookupEmailPageRequestResponseFormatType",
"SocialLookupEmailPageRequestSelectedModel",
"SocialLookupEmailPageResponse",
"SocialLookupEmailPageStatusResponse",
"StreamError",
"Text2AudioPageOutput",
+ "Text2AudioPageRequest",
"Text2AudioPageResponse",
"Text2AudioPageStatusResponse",
"TextToSpeechPageOutput",
+ "TextToSpeechPageRequest",
"TextToSpeechPageRequestOpenaiTtsModel",
"TextToSpeechPageRequestOpenaiVoiceName",
"TextToSpeechPageRequestTtsProvider",
@@ -437,6 +583,7 @@
"TextToSpeechPageStatusResponse",
"TrainingDataModel",
"TranslationPageOutput",
+ "TranslationPageRequest",
"TranslationPageRequestSelectedModel",
"TranslationPageResponse",
"TranslationPageStatusResponse",
@@ -446,6 +593,17 @@
"VideoBotsPageOutput",
"VideoBotsPageOutputFinalKeywordQuery",
"VideoBotsPageOutputFinalPrompt",
+ "VideoBotsPageRequest",
+ "VideoBotsPageRequestAsrModel",
+ "VideoBotsPageRequestCitationStyle",
+ "VideoBotsPageRequestEmbeddingModel",
+ "VideoBotsPageRequestLipsyncModel",
+ "VideoBotsPageRequestOpenaiTtsModel",
+ "VideoBotsPageRequestOpenaiVoiceName",
+ "VideoBotsPageRequestResponseFormatType",
+ "VideoBotsPageRequestSelectedModel",
+ "VideoBotsPageRequestTranslationModel",
+ "VideoBotsPageRequestTtsProvider",
"VideoBotsPageResponse",
"VideoBotsPageStatusResponse",
]
diff --git a/src/gooey/types/asr_page_request.py b/src/gooey/types/asr_page_request.py
new file mode 100644
index 0000000..228b6ff
--- /dev/null
+++ b/src/gooey/types/asr_page_request.py
@@ -0,0 +1,49 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .asr_page_request_output_format import AsrPageRequestOutputFormat
+from .asr_page_request_selected_model import AsrPageRequestSelectedModel
+from .asr_page_request_translation_model import AsrPageRequestTranslationModel
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+
+
+class AsrPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ documents: typing.List[str]
+ selected_model: typing.Optional[AsrPageRequestSelectedModel] = None
+ language: typing.Optional[str] = None
+ translation_model: typing.Optional[AsrPageRequestTranslationModel] = None
+ output_format: typing.Optional[AsrPageRequestOutputFormat] = None
+ google_translate_target: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ use `translation_model` & `translation_target` instead.
+ """
+
+ translation_source: typing.Optional[str] = None
+ translation_target: typing.Optional[str] = None
+ glossary_document: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+ If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+ """
+
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_art_qr_code.py b/src/gooey/types/body_async_form_art_qr_code.py
new file mode 100644
index 0000000..10f380c
--- /dev/null
+++ b/src/gooey/types/body_async_form_art_qr_code.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class BodyAsyncFormArtQrCode(UniversalBaseModel):
+ json_: str = pydantic.Field(alias="json")
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_asr.py b/src/gooey/types/body_async_form_asr.py
new file mode 100644
index 0000000..ec63317
--- /dev/null
+++ b/src/gooey/types/body_async_form_asr.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class BodyAsyncFormAsr(UniversalBaseModel):
+ json_: str = pydantic.Field(alias="json")
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_bulk_eval.py b/src/gooey/types/body_async_form_bulk_eval.py
new file mode 100644
index 0000000..4bce6fa
--- /dev/null
+++ b/src/gooey/types/body_async_form_bulk_eval.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class BodyAsyncFormBulkEval(UniversalBaseModel):
+ json_: str = pydantic.Field(alias="json")
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_bulk_runner.py b/src/gooey/types/body_async_form_bulk_runner.py
new file mode 100644
index 0000000..1460309
--- /dev/null
+++ b/src/gooey/types/body_async_form_bulk_runner.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class BodyAsyncFormBulkRunner(UniversalBaseModel):
+ json_: str = pydantic.Field(alias="json")
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_chyron_plant.py b/src/gooey/types/body_async_form_chyron_plant.py
new file mode 100644
index 0000000..21b2f9d
--- /dev/null
+++ b/src/gooey/types/body_async_form_chyron_plant.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class BodyAsyncFormChyronPlant(UniversalBaseModel):
+ json_: str = pydantic.Field(alias="json")
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_compare_ai_upscalers.py b/src/gooey/types/body_async_form_compare_ai_upscalers.py
new file mode 100644
index 0000000..1379dcd
--- /dev/null
+++ b/src/gooey/types/body_async_form_compare_ai_upscalers.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class BodyAsyncFormCompareAiUpscalers(UniversalBaseModel):
+ json_: str = pydantic.Field(alias="json")
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_compare_llm.py b/src/gooey/types/body_async_form_compare_llm.py
new file mode 100644
index 0000000..c9648d2
--- /dev/null
+++ b/src/gooey/types/body_async_form_compare_llm.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class BodyAsyncFormCompareLlm(UniversalBaseModel):
+ json_: str = pydantic.Field(alias="json")
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_compare_text2img.py b/src/gooey/types/body_async_form_compare_text2img.py
new file mode 100644
index 0000000..c33c36e
--- /dev/null
+++ b/src/gooey/types/body_async_form_compare_text2img.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class BodyAsyncFormCompareText2Img(UniversalBaseModel):
+ json_: str = pydantic.Field(alias="json")
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_deforum_sd.py b/src/gooey/types/body_async_form_deforum_sd.py
new file mode 100644
index 0000000..3bc9b38
--- /dev/null
+++ b/src/gooey/types/body_async_form_deforum_sd.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class BodyAsyncFormDeforumSd(UniversalBaseModel):
+ json_: str = pydantic.Field(alias="json")
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_doc_extract.py b/src/gooey/types/body_async_form_doc_extract.py
new file mode 100644
index 0000000..ac7eb62
--- /dev/null
+++ b/src/gooey/types/body_async_form_doc_extract.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class BodyAsyncFormDocExtract(UniversalBaseModel):
+ json_: str = pydantic.Field(alias="json")
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_doc_search.py b/src/gooey/types/body_async_form_doc_search.py
new file mode 100644
index 0000000..5f92368
--- /dev/null
+++ b/src/gooey/types/body_async_form_doc_search.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class BodyAsyncFormDocSearch(UniversalBaseModel):
+ json_: str = pydantic.Field(alias="json")
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_doc_summary.py b/src/gooey/types/body_async_form_doc_summary.py
new file mode 100644
index 0000000..9464de3
--- /dev/null
+++ b/src/gooey/types/body_async_form_doc_summary.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class BodyAsyncFormDocSummary(UniversalBaseModel):
+ json_: str = pydantic.Field(alias="json")
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_email_face_inpainting.py b/src/gooey/types/body_async_form_email_face_inpainting.py
new file mode 100644
index 0000000..73b8810
--- /dev/null
+++ b/src/gooey/types/body_async_form_email_face_inpainting.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class BodyAsyncFormEmailFaceInpainting(UniversalBaseModel):
+ json_: str = pydantic.Field(alias="json")
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_embeddings.py b/src/gooey/types/body_async_form_embeddings.py
new file mode 100644
index 0000000..b2f780a
--- /dev/null
+++ b/src/gooey/types/body_async_form_embeddings.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class BodyAsyncFormEmbeddings(UniversalBaseModel):
+ json_: str = pydantic.Field(alias="json")
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_face_inpainting.py b/src/gooey/types/body_async_form_face_inpainting.py
new file mode 100644
index 0000000..335f399
--- /dev/null
+++ b/src/gooey/types/body_async_form_face_inpainting.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class BodyAsyncFormFaceInpainting(UniversalBaseModel):
+ json_: str = pydantic.Field(alias="json")
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_functions.py b/src/gooey/types/body_async_form_functions.py
new file mode 100644
index 0000000..c9fe013
--- /dev/null
+++ b/src/gooey/types/body_async_form_functions.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class BodyAsyncFormFunctions(UniversalBaseModel):
+ json_: str = pydantic.Field(alias="json")
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_google_gpt.py b/src/gooey/types/body_async_form_google_gpt.py
new file mode 100644
index 0000000..20d2068
--- /dev/null
+++ b/src/gooey/types/body_async_form_google_gpt.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class BodyAsyncFormGoogleGpt(UniversalBaseModel):
+ json_: str = pydantic.Field(alias="json")
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_google_image_gen.py b/src/gooey/types/body_async_form_google_image_gen.py
new file mode 100644
index 0000000..1ca013e
--- /dev/null
+++ b/src/gooey/types/body_async_form_google_image_gen.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class BodyAsyncFormGoogleImageGen(UniversalBaseModel):
+ json_: str = pydantic.Field(alias="json")
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_image_segmentation.py b/src/gooey/types/body_async_form_image_segmentation.py
new file mode 100644
index 0000000..077a5a1
--- /dev/null
+++ b/src/gooey/types/body_async_form_image_segmentation.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class BodyAsyncFormImageSegmentation(UniversalBaseModel):
+ json_: str = pydantic.Field(alias="json")
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_img2img.py b/src/gooey/types/body_async_form_img2img.py
new file mode 100644
index 0000000..ba1b683
--- /dev/null
+++ b/src/gooey/types/body_async_form_img2img.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class BodyAsyncFormImg2Img(UniversalBaseModel):
+ json_: str = pydantic.Field(alias="json")
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_letter_writer.py b/src/gooey/types/body_async_form_letter_writer.py
new file mode 100644
index 0000000..c2e1d76
--- /dev/null
+++ b/src/gooey/types/body_async_form_letter_writer.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class BodyAsyncFormLetterWriter(UniversalBaseModel):
+ json_: str = pydantic.Field(alias="json")
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_lipsync.py b/src/gooey/types/body_async_form_lipsync.py
new file mode 100644
index 0000000..a236a43
--- /dev/null
+++ b/src/gooey/types/body_async_form_lipsync.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class BodyAsyncFormLipsync(UniversalBaseModel):
+ json_: str = pydantic.Field(alias="json")
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_lipsync_tts.py b/src/gooey/types/body_async_form_lipsync_tts.py
new file mode 100644
index 0000000..dc951af
--- /dev/null
+++ b/src/gooey/types/body_async_form_lipsync_tts.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class BodyAsyncFormLipsyncTts(UniversalBaseModel):
+ json_: str = pydantic.Field(alias="json")
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_object_inpainting.py b/src/gooey/types/body_async_form_object_inpainting.py
new file mode 100644
index 0000000..ccfadef
--- /dev/null
+++ b/src/gooey/types/body_async_form_object_inpainting.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class BodyAsyncFormObjectInpainting(UniversalBaseModel):
+ json_: str = pydantic.Field(alias="json")
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_related_qna_maker.py b/src/gooey/types/body_async_form_related_qna_maker.py
new file mode 100644
index 0000000..a59459f
--- /dev/null
+++ b/src/gooey/types/body_async_form_related_qna_maker.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class BodyAsyncFormRelatedQnaMaker(UniversalBaseModel):
+ json_: str = pydantic.Field(alias="json")
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_related_qna_maker_doc.py b/src/gooey/types/body_async_form_related_qna_maker_doc.py
new file mode 100644
index 0000000..deb15bb
--- /dev/null
+++ b/src/gooey/types/body_async_form_related_qna_maker_doc.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class BodyAsyncFormRelatedQnaMakerDoc(UniversalBaseModel):
+ json_: str = pydantic.Field(alias="json")
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_seo_summary.py b/src/gooey/types/body_async_form_seo_summary.py
new file mode 100644
index 0000000..6a074ee
--- /dev/null
+++ b/src/gooey/types/body_async_form_seo_summary.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class BodyAsyncFormSeoSummary(UniversalBaseModel):
+ json_: str = pydantic.Field(alias="json")
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_smart_gpt.py b/src/gooey/types/body_async_form_smart_gpt.py
new file mode 100644
index 0000000..e2f29f7
--- /dev/null
+++ b/src/gooey/types/body_async_form_smart_gpt.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class BodyAsyncFormSmartGpt(UniversalBaseModel):
+ json_: str = pydantic.Field(alias="json")
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_social_lookup_email.py b/src/gooey/types/body_async_form_social_lookup_email.py
new file mode 100644
index 0000000..ce1890c
--- /dev/null
+++ b/src/gooey/types/body_async_form_social_lookup_email.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class BodyAsyncFormSocialLookupEmail(UniversalBaseModel):
+ json_: str = pydantic.Field(alias="json")
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_text2audio.py b/src/gooey/types/body_async_form_text2audio.py
new file mode 100644
index 0000000..c6f38ef
--- /dev/null
+++ b/src/gooey/types/body_async_form_text2audio.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class BodyAsyncFormText2Audio(UniversalBaseModel):
+ json_: str = pydantic.Field(alias="json")
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_text_to_speech.py b/src/gooey/types/body_async_form_text_to_speech.py
new file mode 100644
index 0000000..6cbc13b
--- /dev/null
+++ b/src/gooey/types/body_async_form_text_to_speech.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class BodyAsyncFormTextToSpeech(UniversalBaseModel):
+ json_: str = pydantic.Field(alias="json")
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_translate.py b/src/gooey/types/body_async_form_translate.py
new file mode 100644
index 0000000..5434bc1
--- /dev/null
+++ b/src/gooey/types/body_async_form_translate.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class BodyAsyncFormTranslate(UniversalBaseModel):
+ json_: str = pydantic.Field(alias="json")
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_video_bots.py b/src/gooey/types/body_async_form_video_bots.py
new file mode 100644
index 0000000..706881d
--- /dev/null
+++ b/src/gooey/types/body_async_form_video_bots.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class BodyAsyncFormVideoBots(UniversalBaseModel):
+ json_: str = pydantic.Field(alias="json")
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/bulk_eval_page_request.py b/src/gooey/types/bulk_eval_page_request.py
new file mode 100644
index 0000000..9981bd3
--- /dev/null
+++ b/src/gooey/types/bulk_eval_page_request.py
@@ -0,0 +1,57 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agg_function import AggFunction
+from .bulk_eval_page_request_response_format_type import BulkEvalPageRequestResponseFormatType
+from .bulk_eval_page_request_selected_model import BulkEvalPageRequestSelectedModel
+from .eval_prompt import EvalPrompt
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+
+
+class BulkEvalPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ documents: typing.List[str] = pydantic.Field()
+ """
+ Upload or link to a CSV or google sheet that contains your sample input data.
+ For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs.
+ Remember to includes header names in your CSV too.
+ """
+
+ eval_prompts: typing.Optional[typing.List[EvalPrompt]] = pydantic.Field(default=None)
+ """
+ Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values.
+ _The `columns` dictionary can be used to reference the spreadsheet columns._
+ """
+
+ agg_functions: typing.Optional[typing.List[AggFunction]] = pydantic.Field(default=None)
+ """
+ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats).
+ """
+
+ selected_model: typing.Optional[BulkEvalPageRequestSelectedModel] = None
+ avoid_repetition: typing.Optional[bool] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[float] = None
+ max_tokens: typing.Optional[int] = None
+ sampling_temperature: typing.Optional[float] = None
+ response_format_type: typing.Optional[BulkEvalPageRequestResponseFormatType] = None
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/evaluator/types/bulk_eval_page_request_response_format_type.py b/src/gooey/types/bulk_eval_page_request_response_format_type.py
similarity index 100%
rename from src/gooey/evaluator/types/bulk_eval_page_request_response_format_type.py
rename to src/gooey/types/bulk_eval_page_request_response_format_type.py
diff --git a/src/gooey/evaluator/types/bulk_eval_page_request_selected_model.py b/src/gooey/types/bulk_eval_page_request_selected_model.py
similarity index 100%
rename from src/gooey/evaluator/types/bulk_eval_page_request_selected_model.py
rename to src/gooey/types/bulk_eval_page_request_selected_model.py
diff --git a/src/gooey/types/bulk_runner_page_request.py b/src/gooey/types/bulk_runner_page_request.py
new file mode 100644
index 0000000..d785c72
--- /dev/null
+++ b/src/gooey/types/bulk_runner_page_request.py
@@ -0,0 +1,56 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+
+
+class BulkRunnerPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ documents: typing.List[str] = pydantic.Field()
+ """
+ Upload or link to a CSV or google sheet that contains your sample input data.
+ For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs.
+ Remember to includes header names in your CSV too.
+ """
+
+ run_urls: typing.List[str] = pydantic.Field()
+ """
+ Provide one or more Gooey.AI workflow runs.
+ You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them.
+ """
+
+ input_columns: typing.Dict[str, str] = pydantic.Field()
+ """
+ For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it.
+ """
+
+ output_columns: typing.Dict[str, str] = pydantic.Field()
+ """
+ For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data.
+ """
+
+ eval_urls: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
+ """
+ _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs.
+ """
+
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/compare_llm_page_request.py b/src/gooey/types/compare_llm_page_request.py
new file mode 100644
index 0000000..87ae925
--- /dev/null
+++ b/src/gooey/types/compare_llm_page_request.py
@@ -0,0 +1,38 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .compare_llm_page_request_response_format_type import CompareLlmPageRequestResponseFormatType
+from .compare_llm_page_request_selected_models_item import CompareLlmPageRequestSelectedModelsItem
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+
+
+class CompareLlmPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ input_prompt: typing.Optional[str] = None
+ selected_models: typing.Optional[typing.List[CompareLlmPageRequestSelectedModelsItem]] = None
+ avoid_repetition: typing.Optional[bool] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[float] = None
+ max_tokens: typing.Optional[int] = None
+ sampling_temperature: typing.Optional[float] = None
+ response_format_type: typing.Optional[CompareLlmPageRequestResponseFormatType] = None
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/compare_text2img_page_request.py b/src/gooey/types/compare_text2img_page_request.py
new file mode 100644
index 0000000..fbfeb11
--- /dev/null
+++ b/src/gooey/types/compare_text2img_page_request.py
@@ -0,0 +1,45 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .compare_text2img_page_request_scheduler import CompareText2ImgPageRequestScheduler
+from .compare_text2img_page_request_selected_models_item import CompareText2ImgPageRequestSelectedModelsItem
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+
+
+class CompareText2ImgPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ text_prompt: str
+ negative_prompt: typing.Optional[str] = None
+ output_width: typing.Optional[int] = None
+ output_height: typing.Optional[int] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[int] = None
+ dall_e3quality: typing.Optional[str] = pydantic.Field(alias="dall_e_3_quality", default=None)
+ dall_e3style: typing.Optional[str] = pydantic.Field(alias="dall_e_3_style", default=None)
+ guidance_scale: typing.Optional[float] = None
+ seed: typing.Optional[int] = None
+ sd2upscaling: typing.Optional[bool] = pydantic.Field(alias="sd_2_upscaling", default=None)
+ selected_models: typing.Optional[typing.List[CompareText2ImgPageRequestSelectedModelsItem]] = None
+ scheduler: typing.Optional[CompareText2ImgPageRequestScheduler] = None
+ edit_instruction: typing.Optional[str] = None
+ image_guidance_scale: typing.Optional[float] = None
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/compare_upscaler_page_request.py b/src/gooey/types/compare_upscaler_page_request.py
new file mode 100644
index 0000000..00411a5
--- /dev/null
+++ b/src/gooey/types/compare_upscaler_page_request.py
@@ -0,0 +1,46 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .compare_upscaler_page_request_selected_models_item import CompareUpscalerPageRequestSelectedModelsItem
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+
+
+class CompareUpscalerPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ input_image: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Input Image
+ """
+
+ input_video: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Input Video
+ """
+
+ scale: int = pydantic.Field()
+ """
+ The final upsampling scale of the image
+ """
+
+ selected_models: typing.Optional[typing.List[CompareUpscalerPageRequestSelectedModelsItem]] = None
+ selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = None
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/deforum_sd_page_request.py b/src/gooey/types/deforum_sd_page_request.py
new file mode 100644
index 0000000..79f6d06
--- /dev/null
+++ b/src/gooey/types/deforum_sd_page_request.py
@@ -0,0 +1,42 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .animation_prompt import AnimationPrompt
+from .deforum_sd_page_request_selected_model import DeforumSdPageRequestSelectedModel
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+
+
+class DeforumSdPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ animation_prompts: typing.List[AnimationPrompt]
+ max_frames: typing.Optional[int] = None
+ selected_model: typing.Optional[DeforumSdPageRequestSelectedModel] = None
+ animation_mode: typing.Optional[str] = None
+ zoom: typing.Optional[str] = None
+ translation_x: typing.Optional[str] = None
+ translation_y: typing.Optional[str] = None
+ rotation3d_x: typing.Optional[str] = pydantic.Field(alias="rotation_3d_x", default=None)
+ rotation3d_y: typing.Optional[str] = pydantic.Field(alias="rotation_3d_y", default=None)
+ rotation3d_z: typing.Optional[str] = pydantic.Field(alias="rotation_3d_z", default=None)
+ fps: typing.Optional[int] = None
+ seed: typing.Optional[int] = None
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/doc_extract_page_request.py b/src/gooey/types/doc_extract_page_request.py
new file mode 100644
index 0000000..1942904
--- /dev/null
+++ b/src/gooey/types/doc_extract_page_request.py
@@ -0,0 +1,49 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .doc_extract_page_request_response_format_type import DocExtractPageRequestResponseFormatType
+from .doc_extract_page_request_selected_asr_model import DocExtractPageRequestSelectedAsrModel
+from .doc_extract_page_request_selected_model import DocExtractPageRequestSelectedModel
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+
+
+class DocExtractPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ documents: typing.List[str]
+ sheet_url: typing.Optional[str] = None
+ selected_asr_model: typing.Optional[DocExtractPageRequestSelectedAsrModel] = None
+ google_translate_target: typing.Optional[str] = None
+ glossary_document: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+ If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+ """
+
+ task_instructions: typing.Optional[str] = None
+ selected_model: typing.Optional[DocExtractPageRequestSelectedModel] = None
+ avoid_repetition: typing.Optional[bool] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[float] = None
+ max_tokens: typing.Optional[int] = None
+ sampling_temperature: typing.Optional[float] = None
+ response_format_type: typing.Optional[DocExtractPageRequestResponseFormatType] = None
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/doc_search_page_request.py b/src/gooey/types/doc_search_page_request.py
new file mode 100644
index 0000000..73d4d6e
--- /dev/null
+++ b/src/gooey/types/doc_search_page_request.py
@@ -0,0 +1,57 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .doc_search_page_request_citation_style import DocSearchPageRequestCitationStyle
+from .doc_search_page_request_embedding_model import DocSearchPageRequestEmbeddingModel
+from .doc_search_page_request_keyword_query import DocSearchPageRequestKeywordQuery
+from .doc_search_page_request_response_format_type import DocSearchPageRequestResponseFormatType
+from .doc_search_page_request_selected_model import DocSearchPageRequestSelectedModel
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+
+
+class DocSearchPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ search_query: str
+ keyword_query: typing.Optional[DocSearchPageRequestKeywordQuery] = None
+ documents: typing.Optional[typing.List[str]] = None
+ max_references: typing.Optional[int] = None
+ max_context_words: typing.Optional[int] = None
+ scroll_jump: typing.Optional[int] = None
+ doc_extract_url: typing.Optional[str] = None
+ embedding_model: typing.Optional[DocSearchPageRequestEmbeddingModel] = None
+ dense_weight: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+ """
+
+ task_instructions: typing.Optional[str] = None
+ query_instructions: typing.Optional[str] = None
+ selected_model: typing.Optional[DocSearchPageRequestSelectedModel] = None
+ citation_style: typing.Optional[DocSearchPageRequestCitationStyle] = None
+ avoid_repetition: typing.Optional[bool] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[float] = None
+ max_tokens: typing.Optional[int] = None
+ sampling_temperature: typing.Optional[float] = None
+ response_format_type: typing.Optional[DocSearchPageRequestResponseFormatType] = None
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/doc_summary_page_request.py b/src/gooey/types/doc_summary_page_request.py
new file mode 100644
index 0000000..cb112fc
--- /dev/null
+++ b/src/gooey/types/doc_summary_page_request.py
@@ -0,0 +1,44 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .doc_summary_page_request_response_format_type import DocSummaryPageRequestResponseFormatType
+from .doc_summary_page_request_selected_asr_model import DocSummaryPageRequestSelectedAsrModel
+from .doc_summary_page_request_selected_model import DocSummaryPageRequestSelectedModel
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+
+
+class DocSummaryPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ documents: typing.List[str]
+ task_instructions: typing.Optional[str] = None
+ merge_instructions: typing.Optional[str] = None
+ selected_model: typing.Optional[DocSummaryPageRequestSelectedModel] = None
+ chain_type: typing.Optional[typing.Literal["map_reduce"]] = None
+ selected_asr_model: typing.Optional[DocSummaryPageRequestSelectedAsrModel] = None
+ google_translate_target: typing.Optional[str] = None
+ avoid_repetition: typing.Optional[bool] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[float] = None
+ max_tokens: typing.Optional[int] = None
+ sampling_temperature: typing.Optional[float] = None
+ response_format_type: typing.Optional[DocSummaryPageRequestResponseFormatType] = None
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/email_face_inpainting_page_request.py b/src/gooey/types/email_face_inpainting_page_request.py
new file mode 100644
index 0000000..07f4660
--- /dev/null
+++ b/src/gooey/types/email_face_inpainting_page_request.py
@@ -0,0 +1,52 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .email_face_inpainting_page_request_selected_model import EmailFaceInpaintingPageRequestSelectedModel
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+
+
+class EmailFaceInpaintingPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ email_address: typing.Optional[str] = None
+ twitter_handle: typing.Optional[str] = None
+ text_prompt: str
+ face_scale: typing.Optional[float] = None
+ face_pos_x: typing.Optional[float] = None
+ face_pos_y: typing.Optional[float] = None
+ selected_model: typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] = None
+ negative_prompt: typing.Optional[str] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[int] = None
+ upscale_factor: typing.Optional[float] = None
+ output_width: typing.Optional[int] = None
+ output_height: typing.Optional[int] = None
+ guidance_scale: typing.Optional[float] = None
+ should_send_email: typing.Optional[bool] = None
+ email_from: typing.Optional[str] = None
+ email_cc: typing.Optional[str] = None
+ email_bcc: typing.Optional[str] = None
+ email_subject: typing.Optional[str] = None
+ email_body: typing.Optional[str] = None
+ email_body_enable_html: typing.Optional[bool] = None
+ fallback_email_body: typing.Optional[str] = None
+ seed: typing.Optional[int] = None
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/embeddings_page_request.py b/src/gooey/types/embeddings_page_request.py
new file mode 100644
index 0000000..9e67171
--- /dev/null
+++ b/src/gooey/types/embeddings_page_request.py
@@ -0,0 +1,31 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .embeddings_page_request_selected_model import EmbeddingsPageRequestSelectedModel
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+
+
+class EmbeddingsPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ texts: typing.List[str]
+ selected_model: typing.Optional[EmbeddingsPageRequestSelectedModel] = None
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/face_inpainting_page_request.py b/src/gooey/types/face_inpainting_page_request.py
new file mode 100644
index 0000000..868b53b
--- /dev/null
+++ b/src/gooey/types/face_inpainting_page_request.py
@@ -0,0 +1,43 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .face_inpainting_page_request_selected_model import FaceInpaintingPageRequestSelectedModel
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+
+
+class FaceInpaintingPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ input_image: str
+ text_prompt: str
+ face_scale: typing.Optional[float] = None
+ face_pos_x: typing.Optional[float] = None
+ face_pos_y: typing.Optional[float] = None
+ selected_model: typing.Optional[FaceInpaintingPageRequestSelectedModel] = None
+ negative_prompt: typing.Optional[str] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[int] = None
+ upscale_factor: typing.Optional[float] = None
+ output_width: typing.Optional[int] = None
+ output_height: typing.Optional[int] = None
+ guidance_scale: typing.Optional[float] = None
+ seed: typing.Optional[int] = None
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/functions_page_request.py b/src/gooey/types/functions_page_request.py
new file mode 100644
index 0000000..30406dd
--- /dev/null
+++ b/src/gooey/types/functions_page_request.py
@@ -0,0 +1,31 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .run_settings import RunSettings
+
+
+class FunctionsPageRequest(UniversalBaseModel):
+ code: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The JS code to be executed.
+ """
+
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
+ """
+ Variables to be used in the code
+ """
+
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/google_gpt_page_request.py b/src/gooey/types/google_gpt_page_request.py
new file mode 100644
index 0000000..9def494
--- /dev/null
+++ b/src/gooey/types/google_gpt_page_request.py
@@ -0,0 +1,67 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .google_gpt_page_request_embedding_model import GoogleGptPageRequestEmbeddingModel
+from .google_gpt_page_request_response_format_type import GoogleGptPageRequestResponseFormatType
+from .google_gpt_page_request_selected_model import GoogleGptPageRequestSelectedModel
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+from .serp_search_location import SerpSearchLocation
+from .serp_search_type import SerpSearchType
+
+
+class GoogleGptPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ search_query: str
+ site_filter: str
+ task_instructions: typing.Optional[str] = None
+ query_instructions: typing.Optional[str] = None
+ selected_model: typing.Optional[GoogleGptPageRequestSelectedModel] = None
+ max_search_urls: typing.Optional[int] = None
+ max_references: typing.Optional[int] = None
+ max_context_words: typing.Optional[int] = None
+ scroll_jump: typing.Optional[int] = None
+ embedding_model: typing.Optional[GoogleGptPageRequestEmbeddingModel] = None
+ dense_weight: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+ """
+
+ avoid_repetition: typing.Optional[bool] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[float] = None
+ max_tokens: typing.Optional[int] = None
+ sampling_temperature: typing.Optional[float] = None
+ response_format_type: typing.Optional[GoogleGptPageRequestResponseFormatType] = None
+ serp_search_location: typing.Optional[SerpSearchLocation] = None
+ scaleserp_locations: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
+ """
+ DEPRECATED: use `serp_search_location` instead
+ """
+
+ serp_search_type: typing.Optional[SerpSearchType] = None
+ scaleserp_search_field: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ DEPRECATED: use `serp_search_type` instead
+ """
+
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/google_image_gen_page_request.py b/src/gooey/types/google_image_gen_page_request.py
new file mode 100644
index 0000000..8e1360b
--- /dev/null
+++ b/src/gooey/types/google_image_gen_page_request.py
@@ -0,0 +1,47 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .google_image_gen_page_request_selected_model import GoogleImageGenPageRequestSelectedModel
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+from .serp_search_location import SerpSearchLocation
+
+
+class GoogleImageGenPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ serp_search_location: typing.Optional[SerpSearchLocation] = None
+ scaleserp_locations: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
+ """
+ DEPRECATED: use `serp_search_location` instead
+ """
+
+ search_query: str
+ text_prompt: str
+ selected_model: typing.Optional[GoogleImageGenPageRequestSelectedModel] = None
+ negative_prompt: typing.Optional[str] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[int] = None
+ guidance_scale: typing.Optional[float] = None
+ prompt_strength: typing.Optional[float] = None
+ sd2upscaling: typing.Optional[bool] = pydantic.Field(alias="sd_2_upscaling", default=None)
+ seed: typing.Optional[int] = None
+ image_guidance_scale: typing.Optional[float] = None
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/image_segmentation_page_request.py b/src/gooey/types/image_segmentation_page_request.py
new file mode 100644
index 0000000..9f2bc39
--- /dev/null
+++ b/src/gooey/types/image_segmentation_page_request.py
@@ -0,0 +1,37 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .image_segmentation_page_request_selected_model import ImageSegmentationPageRequestSelectedModel
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+
+
+class ImageSegmentationPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ input_image: str
+ selected_model: typing.Optional[ImageSegmentationPageRequestSelectedModel] = None
+ mask_threshold: typing.Optional[float] = None
+ rect_persepective_transform: typing.Optional[bool] = None
+ reflection_opacity: typing.Optional[float] = None
+ obj_scale: typing.Optional[float] = None
+ obj_pos_x: typing.Optional[float] = None
+ obj_pos_y: typing.Optional[float] = None
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/img2img_page_request.py b/src/gooey/types/img2img_page_request.py
new file mode 100644
index 0000000..818cecb
--- /dev/null
+++ b/src/gooey/types/img2img_page_request.py
@@ -0,0 +1,44 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .img2img_page_request_selected_controlnet_model import Img2ImgPageRequestSelectedControlnetModel
+from .img2img_page_request_selected_model import Img2ImgPageRequestSelectedModel
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+
+
+class Img2ImgPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ input_image: str
+ text_prompt: typing.Optional[str] = None
+ selected_model: typing.Optional[Img2ImgPageRequestSelectedModel] = None
+ selected_controlnet_model: typing.Optional[Img2ImgPageRequestSelectedControlnetModel] = None
+ negative_prompt: typing.Optional[str] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[int] = None
+ output_width: typing.Optional[int] = None
+ output_height: typing.Optional[int] = None
+ guidance_scale: typing.Optional[float] = None
+ prompt_strength: typing.Optional[float] = None
+ controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None
+ seed: typing.Optional[int] = None
+ image_guidance_scale: typing.Optional[float] = None
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/lipsync_page_request.py b/src/gooey/types/lipsync_page_request.py
new file mode 100644
index 0000000..89840ab
--- /dev/null
+++ b/src/gooey/types/lipsync_page_request.py
@@ -0,0 +1,38 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .lipsync_page_request_selected_model import LipsyncPageRequestSelectedModel
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+from .sad_talker_settings import SadTalkerSettings
+
+
+class LipsyncPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ input_face: typing.Optional[str] = None
+ face_padding_top: typing.Optional[int] = None
+ face_padding_bottom: typing.Optional[int] = None
+ face_padding_left: typing.Optional[int] = None
+ face_padding_right: typing.Optional[int] = None
+ sadtalker_settings: typing.Optional[SadTalkerSettings] = None
+ selected_model: typing.Optional[LipsyncPageRequestSelectedModel] = None
+ input_audio: typing.Optional[str] = None
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/lip_syncing/types/lipsync_page_request_selected_model.py b/src/gooey/types/lipsync_page_request_selected_model.py
similarity index 100%
rename from src/gooey/lip_syncing/types/lipsync_page_request_selected_model.py
rename to src/gooey/types/lipsync_page_request_selected_model.py
diff --git a/src/gooey/types/lipsync_tts_page_request.py b/src/gooey/types/lipsync_tts_page_request.py
new file mode 100644
index 0000000..31cdcd5
--- /dev/null
+++ b/src/gooey/types/lipsync_tts_page_request.py
@@ -0,0 +1,63 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .lipsync_tts_page_request_openai_tts_model import LipsyncTtsPageRequestOpenaiTtsModel
+from .lipsync_tts_page_request_openai_voice_name import LipsyncTtsPageRequestOpenaiVoiceName
+from .lipsync_tts_page_request_selected_model import LipsyncTtsPageRequestSelectedModel
+from .lipsync_tts_page_request_tts_provider import LipsyncTtsPageRequestTtsProvider
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+from .sad_talker_settings import SadTalkerSettings
+
+
+class LipsyncTtsPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ text_prompt: str
+ tts_provider: typing.Optional[LipsyncTtsPageRequestTtsProvider] = None
+ uberduck_voice_name: typing.Optional[str] = None
+ uberduck_speaking_rate: typing.Optional[float] = None
+ google_voice_name: typing.Optional[str] = None
+ google_speaking_rate: typing.Optional[float] = None
+ google_pitch: typing.Optional[float] = None
+ bark_history_prompt: typing.Optional[str] = None
+ elevenlabs_voice_name: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Use `elevenlabs_voice_id` instead
+ """
+
+ elevenlabs_api_key: typing.Optional[str] = None
+ elevenlabs_voice_id: typing.Optional[str] = None
+ elevenlabs_model: typing.Optional[str] = None
+ elevenlabs_stability: typing.Optional[float] = None
+ elevenlabs_similarity_boost: typing.Optional[float] = None
+ elevenlabs_style: typing.Optional[float] = None
+ elevenlabs_speaker_boost: typing.Optional[bool] = None
+ azure_voice_name: typing.Optional[str] = None
+ openai_voice_name: typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] = None
+ openai_tts_model: typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] = None
+ input_face: typing.Optional[str] = None
+ face_padding_top: typing.Optional[int] = None
+ face_padding_bottom: typing.Optional[int] = None
+ face_padding_left: typing.Optional[int] = None
+ face_padding_right: typing.Optional[int] = None
+ sadtalker_settings: typing.Optional[SadTalkerSettings] = None
+ selected_model: typing.Optional[LipsyncTtsPageRequestSelectedModel] = None
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/object_inpainting_page_request.py b/src/gooey/types/object_inpainting_page_request.py
new file mode 100644
index 0000000..3b1cbc5
--- /dev/null
+++ b/src/gooey/types/object_inpainting_page_request.py
@@ -0,0 +1,44 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .object_inpainting_page_request_selected_model import ObjectInpaintingPageRequestSelectedModel
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+
+
+class ObjectInpaintingPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ input_image: str
+ text_prompt: str
+ obj_scale: typing.Optional[float] = None
+ obj_pos_x: typing.Optional[float] = None
+ obj_pos_y: typing.Optional[float] = None
+ mask_threshold: typing.Optional[float] = None
+ selected_model: typing.Optional[ObjectInpaintingPageRequestSelectedModel] = None
+ negative_prompt: typing.Optional[str] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[int] = None
+ output_width: typing.Optional[int] = None
+ output_height: typing.Optional[int] = None
+ guidance_scale: typing.Optional[float] = None
+ sd2upscaling: typing.Optional[bool] = pydantic.Field(alias="sd_2_upscaling", default=None)
+ seed: typing.Optional[int] = None
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/qr_code_generator_page_request.py b/src/gooey/types/qr_code_generator_page_request.py
new file mode 100644
index 0000000..6ebb5c4
--- /dev/null
+++ b/src/gooey/types/qr_code_generator_page_request.py
@@ -0,0 +1,67 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .qr_code_generator_page_request_image_prompt_controlnet_models_item import (
+ QrCodeGeneratorPageRequestImagePromptControlnetModelsItem,
+)
+from .qr_code_generator_page_request_scheduler import QrCodeGeneratorPageRequestScheduler
+from .qr_code_generator_page_request_selected_controlnet_model_item import (
+ QrCodeGeneratorPageRequestSelectedControlnetModelItem,
+)
+from .qr_code_generator_page_request_selected_model import QrCodeGeneratorPageRequestSelectedModel
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+from .vcard import Vcard
+
+
+class QrCodeGeneratorPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ qr_code_data: typing.Optional[str] = None
+ qr_code_input_image: typing.Optional[str] = None
+ qr_code_vcard: typing.Optional[Vcard] = None
+ qr_code_file: typing.Optional[str] = None
+ use_url_shortener: typing.Optional[bool] = None
+ text_prompt: str
+ negative_prompt: typing.Optional[str] = None
+ image_prompt: typing.Optional[str] = None
+ image_prompt_controlnet_models: typing.Optional[
+ typing.List[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]
+ ] = None
+ image_prompt_strength: typing.Optional[float] = None
+ image_prompt_scale: typing.Optional[float] = None
+ image_prompt_pos_x: typing.Optional[float] = None
+ image_prompt_pos_y: typing.Optional[float] = None
+ selected_model: typing.Optional[QrCodeGeneratorPageRequestSelectedModel] = None
+ selected_controlnet_model: typing.Optional[
+ typing.List[QrCodeGeneratorPageRequestSelectedControlnetModelItem]
+ ] = None
+ output_width: typing.Optional[int] = None
+ output_height: typing.Optional[int] = None
+ guidance_scale: typing.Optional[float] = None
+ controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[int] = None
+ scheduler: typing.Optional[QrCodeGeneratorPageRequestScheduler] = None
+ seed: typing.Optional[int] = None
+ obj_scale: typing.Optional[float] = None
+ obj_pos_x: typing.Optional[float] = None
+ obj_pos_y: typing.Optional[float] = None
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/related_qn_a_doc_page_request.py b/src/gooey/types/related_qn_a_doc_page_request.py
new file mode 100644
index 0000000..b898b4f
--- /dev/null
+++ b/src/gooey/types/related_qn_a_doc_page_request.py
@@ -0,0 +1,71 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .recipe_function import RecipeFunction
+from .related_qn_a_doc_page_request_citation_style import RelatedQnADocPageRequestCitationStyle
+from .related_qn_a_doc_page_request_embedding_model import RelatedQnADocPageRequestEmbeddingModel
+from .related_qn_a_doc_page_request_keyword_query import RelatedQnADocPageRequestKeywordQuery
+from .related_qn_a_doc_page_request_response_format_type import RelatedQnADocPageRequestResponseFormatType
+from .related_qn_a_doc_page_request_selected_model import RelatedQnADocPageRequestSelectedModel
+from .run_settings import RunSettings
+from .serp_search_location import SerpSearchLocation
+from .serp_search_type import SerpSearchType
+
+
+class RelatedQnADocPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ search_query: str
+ keyword_query: typing.Optional[RelatedQnADocPageRequestKeywordQuery] = None
+ documents: typing.Optional[typing.List[str]] = None
+ max_references: typing.Optional[int] = None
+ max_context_words: typing.Optional[int] = None
+ scroll_jump: typing.Optional[int] = None
+ doc_extract_url: typing.Optional[str] = None
+ embedding_model: typing.Optional[RelatedQnADocPageRequestEmbeddingModel] = None
+ dense_weight: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+ """
+
+ task_instructions: typing.Optional[str] = None
+ query_instructions: typing.Optional[str] = None
+ selected_model: typing.Optional[RelatedQnADocPageRequestSelectedModel] = None
+ citation_style: typing.Optional[RelatedQnADocPageRequestCitationStyle] = None
+ avoid_repetition: typing.Optional[bool] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[float] = None
+ max_tokens: typing.Optional[int] = None
+ sampling_temperature: typing.Optional[float] = None
+ response_format_type: typing.Optional[RelatedQnADocPageRequestResponseFormatType] = None
+ serp_search_location: typing.Optional[SerpSearchLocation] = None
+ scaleserp_locations: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
+ """
+ DEPRECATED: use `serp_search_location` instead
+ """
+
+ serp_search_type: typing.Optional[SerpSearchType] = None
+ scaleserp_search_field: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ DEPRECATED: use `serp_search_type` instead
+ """
+
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/related_qn_a_page_request.py b/src/gooey/types/related_qn_a_page_request.py
new file mode 100644
index 0000000..3491f18
--- /dev/null
+++ b/src/gooey/types/related_qn_a_page_request.py
@@ -0,0 +1,67 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .recipe_function import RecipeFunction
+from .related_qn_a_page_request_embedding_model import RelatedQnAPageRequestEmbeddingModel
+from .related_qn_a_page_request_response_format_type import RelatedQnAPageRequestResponseFormatType
+from .related_qn_a_page_request_selected_model import RelatedQnAPageRequestSelectedModel
+from .run_settings import RunSettings
+from .serp_search_location import SerpSearchLocation
+from .serp_search_type import SerpSearchType
+
+
+class RelatedQnAPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ search_query: str
+ site_filter: str
+ task_instructions: typing.Optional[str] = None
+ query_instructions: typing.Optional[str] = None
+ selected_model: typing.Optional[RelatedQnAPageRequestSelectedModel] = None
+ max_search_urls: typing.Optional[int] = None
+ max_references: typing.Optional[int] = None
+ max_context_words: typing.Optional[int] = None
+ scroll_jump: typing.Optional[int] = None
+ embedding_model: typing.Optional[RelatedQnAPageRequestEmbeddingModel] = None
+ dense_weight: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+ """
+
+ avoid_repetition: typing.Optional[bool] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[float] = None
+ max_tokens: typing.Optional[int] = None
+ sampling_temperature: typing.Optional[float] = None
+ response_format_type: typing.Optional[RelatedQnAPageRequestResponseFormatType] = None
+ serp_search_location: typing.Optional[SerpSearchLocation] = None
+ scaleserp_locations: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
+ """
+ DEPRECATED: use `serp_search_location` instead
+ """
+
+ serp_search_type: typing.Optional[SerpSearchType] = None
+ scaleserp_search_field: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ DEPRECATED: use `serp_search_type` instead
+ """
+
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/seo_summary_page_request.py b/src/gooey/types/seo_summary_page_request.py
new file mode 100644
index 0000000..12121af
--- /dev/null
+++ b/src/gooey/types/seo_summary_page_request.py
@@ -0,0 +1,53 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .run_settings import RunSettings
+from .seo_summary_page_request_response_format_type import SeoSummaryPageRequestResponseFormatType
+from .seo_summary_page_request_selected_model import SeoSummaryPageRequestSelectedModel
+from .serp_search_location import SerpSearchLocation
+from .serp_search_type import SerpSearchType
+
+
+class SeoSummaryPageRequest(UniversalBaseModel):
+ search_query: str
+ keywords: str
+ title: str
+ company_url: str
+ task_instructions: typing.Optional[str] = None
+ enable_html: typing.Optional[bool] = None
+ selected_model: typing.Optional[SeoSummaryPageRequestSelectedModel] = None
+ max_search_urls: typing.Optional[int] = None
+ enable_crosslinks: typing.Optional[bool] = None
+ seed: typing.Optional[int] = None
+ avoid_repetition: typing.Optional[bool] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[float] = None
+ max_tokens: typing.Optional[int] = None
+ sampling_temperature: typing.Optional[float] = None
+ response_format_type: typing.Optional[SeoSummaryPageRequestResponseFormatType] = None
+ serp_search_location: typing.Optional[SerpSearchLocation] = None
+ scaleserp_locations: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
+ """
+ DEPRECATED: use `serp_search_location` instead
+ """
+
+ serp_search_type: typing.Optional[SerpSearchType] = None
+ scaleserp_search_field: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ DEPRECATED: use `serp_search_type` instead
+ """
+
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/smart_gpt_page_request.py b/src/gooey/types/smart_gpt_page_request.py
new file mode 100644
index 0000000..ceedad9
--- /dev/null
+++ b/src/gooey/types/smart_gpt_page_request.py
@@ -0,0 +1,41 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+from .smart_gpt_page_request_response_format_type import SmartGptPageRequestResponseFormatType
+from .smart_gpt_page_request_selected_model import SmartGptPageRequestSelectedModel
+
+
+class SmartGptPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ input_prompt: str
+ cot_prompt: typing.Optional[str] = None
+ reflexion_prompt: typing.Optional[str] = None
+ dera_prompt: typing.Optional[str] = None
+ selected_model: typing.Optional[SmartGptPageRequestSelectedModel] = None
+ avoid_repetition: typing.Optional[bool] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[float] = None
+ max_tokens: typing.Optional[int] = None
+ sampling_temperature: typing.Optional[float] = None
+ response_format_type: typing.Optional[SmartGptPageRequestResponseFormatType] = None
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/smart_gpt/types/smart_gpt_page_request_response_format_type.py b/src/gooey/types/smart_gpt_page_request_response_format_type.py
similarity index 100%
rename from src/gooey/smart_gpt/types/smart_gpt_page_request_response_format_type.py
rename to src/gooey/types/smart_gpt_page_request_response_format_type.py
diff --git a/src/gooey/smart_gpt/types/smart_gpt_page_request_selected_model.py b/src/gooey/types/smart_gpt_page_request_selected_model.py
similarity index 100%
rename from src/gooey/smart_gpt/types/smart_gpt_page_request_selected_model.py
rename to src/gooey/types/smart_gpt_page_request_selected_model.py
diff --git a/src/gooey/types/social_lookup_email_page_request.py b/src/gooey/types/social_lookup_email_page_request.py
new file mode 100644
index 0000000..39bcef3
--- /dev/null
+++ b/src/gooey/types/social_lookup_email_page_request.py
@@ -0,0 +1,39 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+from .social_lookup_email_page_request_response_format_type import SocialLookupEmailPageRequestResponseFormatType
+from .social_lookup_email_page_request_selected_model import SocialLookupEmailPageRequestSelectedModel
+
+
+class SocialLookupEmailPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ email_address: str
+ input_prompt: typing.Optional[str] = None
+ selected_model: typing.Optional[SocialLookupEmailPageRequestSelectedModel] = None
+ avoid_repetition: typing.Optional[bool] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[float] = None
+ max_tokens: typing.Optional[int] = None
+ sampling_temperature: typing.Optional[float] = None
+ response_format_type: typing.Optional[SocialLookupEmailPageRequestResponseFormatType] = None
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/text2audio_page_request.py b/src/gooey/types/text2audio_page_request.py
new file mode 100644
index 0000000..f549c7e
--- /dev/null
+++ b/src/gooey/types/text2audio_page_request.py
@@ -0,0 +1,37 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+
+
+class Text2AudioPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ text_prompt: str
+ negative_prompt: typing.Optional[str] = None
+ duration_sec: typing.Optional[float] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[int] = None
+ guidance_scale: typing.Optional[float] = None
+ seed: typing.Optional[int] = None
+ sd2upscaling: typing.Optional[bool] = pydantic.Field(alias="sd_2_upscaling", default=None)
+ selected_models: typing.Optional[typing.List[typing.Literal["audio_ldm"]]] = None
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/text_to_speech_page_request.py b/src/gooey/types/text_to_speech_page_request.py
new file mode 100644
index 0000000..bdd5d95
--- /dev/null
+++ b/src/gooey/types/text_to_speech_page_request.py
@@ -0,0 +1,54 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+from .text_to_speech_page_request_openai_tts_model import TextToSpeechPageRequestOpenaiTtsModel
+from .text_to_speech_page_request_openai_voice_name import TextToSpeechPageRequestOpenaiVoiceName
+from .text_to_speech_page_request_tts_provider import TextToSpeechPageRequestTtsProvider
+
+
+class TextToSpeechPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ text_prompt: str
+ tts_provider: typing.Optional[TextToSpeechPageRequestTtsProvider] = None
+ uberduck_voice_name: typing.Optional[str] = None
+ uberduck_speaking_rate: typing.Optional[float] = None
+ google_voice_name: typing.Optional[str] = None
+ google_speaking_rate: typing.Optional[float] = None
+ google_pitch: typing.Optional[float] = None
+ bark_history_prompt: typing.Optional[str] = None
+ elevenlabs_voice_name: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Use `elevenlabs_voice_id` instead
+ """
+
+ elevenlabs_api_key: typing.Optional[str] = None
+ elevenlabs_voice_id: typing.Optional[str] = None
+ elevenlabs_model: typing.Optional[str] = None
+ elevenlabs_stability: typing.Optional[float] = None
+ elevenlabs_similarity_boost: typing.Optional[float] = None
+ elevenlabs_style: typing.Optional[float] = None
+ elevenlabs_speaker_boost: typing.Optional[bool] = None
+ azure_voice_name: typing.Optional[str] = None
+ openai_voice_name: typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] = None
+ openai_tts_model: typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] = None
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/translation_page_request.py b/src/gooey/types/translation_page_request.py
new file mode 100644
index 0000000..2c0f394
--- /dev/null
+++ b/src/gooey/types/translation_page_request.py
@@ -0,0 +1,39 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+from .translation_page_request_selected_model import TranslationPageRequestSelectedModel
+
+
+class TranslationPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ texts: typing.Optional[typing.List[str]] = None
+ selected_model: typing.Optional[TranslationPageRequestSelectedModel] = None
+ translation_source: typing.Optional[str] = None
+ translation_target: typing.Optional[str] = None
+ glossary_document: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+ If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+ """
+
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/video_bots_page_request.py b/src/gooey/types/video_bots_page_request.py
new file mode 100644
index 0000000..f6824e8
--- /dev/null
+++ b/src/gooey/types/video_bots_page_request.py
@@ -0,0 +1,140 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .conversation_entry import ConversationEntry
+from .llm_tools import LlmTools
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+from .sad_talker_settings import SadTalkerSettings
+from .video_bots_page_request_asr_model import VideoBotsPageRequestAsrModel
+from .video_bots_page_request_citation_style import VideoBotsPageRequestCitationStyle
+from .video_bots_page_request_embedding_model import VideoBotsPageRequestEmbeddingModel
+from .video_bots_page_request_lipsync_model import VideoBotsPageRequestLipsyncModel
+from .video_bots_page_request_openai_tts_model import VideoBotsPageRequestOpenaiTtsModel
+from .video_bots_page_request_openai_voice_name import VideoBotsPageRequestOpenaiVoiceName
+from .video_bots_page_request_response_format_type import VideoBotsPageRequestResponseFormatType
+from .video_bots_page_request_selected_model import VideoBotsPageRequestSelectedModel
+from .video_bots_page_request_translation_model import VideoBotsPageRequestTranslationModel
+from .video_bots_page_request_tts_provider import VideoBotsPageRequestTtsProvider
+
+
+class VideoBotsPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ input_prompt: typing.Optional[str] = None
+ input_audio: typing.Optional[str] = None
+ input_images: typing.Optional[typing.List[str]] = None
+ input_documents: typing.Optional[typing.List[str]] = None
+ doc_extract_url: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Select a workflow to extract text from documents and images.
+ """
+
+ messages: typing.Optional[typing.List[ConversationEntry]] = None
+ bot_script: typing.Optional[str] = None
+ selected_model: typing.Optional[VideoBotsPageRequestSelectedModel] = None
+ document_model: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api))
+ """
+
+ task_instructions: typing.Optional[str] = None
+ query_instructions: typing.Optional[str] = None
+ keyword_instructions: typing.Optional[str] = None
+ documents: typing.Optional[typing.List[str]] = None
+ max_references: typing.Optional[int] = None
+ max_context_words: typing.Optional[int] = None
+ scroll_jump: typing.Optional[int] = None
+ embedding_model: typing.Optional[VideoBotsPageRequestEmbeddingModel] = None
+ dense_weight: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+ """
+
+ citation_style: typing.Optional[VideoBotsPageRequestCitationStyle] = None
+ use_url_shortener: typing.Optional[bool] = None
+ asr_model: typing.Optional[VideoBotsPageRequestAsrModel] = pydantic.Field(default=None)
+ """
+ Choose a model to transcribe incoming audio messages to text.
+ """
+
+ asr_language: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Choose a language to transcribe incoming audio messages to text.
+ """
+
+ translation_model: typing.Optional[VideoBotsPageRequestTranslationModel] = None
+ user_language: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages.
+ """
+
+ input_glossary_document: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Translation Glossary for User Langauge -> LLM Language (English)
+ """
+
+ output_glossary_document: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Translation Glossary for LLM Language (English) -> User Langauge
+ """
+
+ lipsync_model: typing.Optional[VideoBotsPageRequestLipsyncModel] = None
+ tools: typing.Optional[typing.List[LlmTools]] = pydantic.Field(default=None)
+ """
+ Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
+ """
+
+ avoid_repetition: typing.Optional[bool] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[float] = None
+ max_tokens: typing.Optional[int] = None
+ sampling_temperature: typing.Optional[float] = None
+ response_format_type: typing.Optional[VideoBotsPageRequestResponseFormatType] = None
+ tts_provider: typing.Optional[VideoBotsPageRequestTtsProvider] = None
+ uberduck_voice_name: typing.Optional[str] = None
+ uberduck_speaking_rate: typing.Optional[float] = None
+ google_voice_name: typing.Optional[str] = None
+ google_speaking_rate: typing.Optional[float] = None
+ google_pitch: typing.Optional[float] = None
+ bark_history_prompt: typing.Optional[str] = None
+ elevenlabs_voice_name: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Use `elevenlabs_voice_id` instead
+ """
+
+ elevenlabs_api_key: typing.Optional[str] = None
+ elevenlabs_voice_id: typing.Optional[str] = None
+ elevenlabs_model: typing.Optional[str] = None
+ elevenlabs_stability: typing.Optional[float] = None
+ elevenlabs_similarity_boost: typing.Optional[float] = None
+ elevenlabs_style: typing.Optional[float] = None
+ elevenlabs_speaker_boost: typing.Optional[bool] = None
+ azure_voice_name: typing.Optional[str] = None
+ openai_voice_name: typing.Optional[VideoBotsPageRequestOpenaiVoiceName] = None
+ openai_tts_model: typing.Optional[VideoBotsPageRequestOpenaiTtsModel] = None
+ input_face: typing.Optional[str] = None
+ face_padding_top: typing.Optional[int] = None
+ face_padding_bottom: typing.Optional[int] = None
+ face_padding_left: typing.Optional[int] = None
+ face_padding_right: typing.Optional[int] = None
+ sadtalker_settings: typing.Optional[SadTalkerSettings] = None
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_asr_model.py b/src/gooey/types/video_bots_page_request_asr_model.py
similarity index 100%
rename from src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_asr_model.py
rename to src/gooey/types/video_bots_page_request_asr_model.py
diff --git a/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_citation_style.py b/src/gooey/types/video_bots_page_request_citation_style.py
similarity index 100%
rename from src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_citation_style.py
rename to src/gooey/types/video_bots_page_request_citation_style.py
diff --git a/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_embedding_model.py b/src/gooey/types/video_bots_page_request_embedding_model.py
similarity index 100%
rename from src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_embedding_model.py
rename to src/gooey/types/video_bots_page_request_embedding_model.py
diff --git a/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_lipsync_model.py b/src/gooey/types/video_bots_page_request_lipsync_model.py
similarity index 100%
rename from src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_lipsync_model.py
rename to src/gooey/types/video_bots_page_request_lipsync_model.py
diff --git a/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_openai_tts_model.py b/src/gooey/types/video_bots_page_request_openai_tts_model.py
similarity index 100%
rename from src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_openai_tts_model.py
rename to src/gooey/types/video_bots_page_request_openai_tts_model.py
diff --git a/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_openai_voice_name.py b/src/gooey/types/video_bots_page_request_openai_voice_name.py
similarity index 100%
rename from src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_openai_voice_name.py
rename to src/gooey/types/video_bots_page_request_openai_voice_name.py
diff --git a/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_response_format_type.py b/src/gooey/types/video_bots_page_request_response_format_type.py
similarity index 100%
rename from src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_response_format_type.py
rename to src/gooey/types/video_bots_page_request_response_format_type.py
diff --git a/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_selected_model.py b/src/gooey/types/video_bots_page_request_selected_model.py
similarity index 100%
rename from src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_selected_model.py
rename to src/gooey/types/video_bots_page_request_selected_model.py
diff --git a/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_translation_model.py b/src/gooey/types/video_bots_page_request_translation_model.py
similarity index 100%
rename from src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_translation_model.py
rename to src/gooey/types/video_bots_page_request_translation_model.py
diff --git a/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_tts_provider.py b/src/gooey/types/video_bots_page_request_tts_provider.py
similarity index 100%
rename from src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_tts_provider.py
rename to src/gooey/types/video_bots_page_request_tts_provider.py