From c4e851b68264e1a0d9328b57073d0fb19b3c9d5c Mon Sep 17 00:00:00 2001 From: fern-api <115122769+fern-api[bot]@users.noreply.github.com> Date: Thu, 5 Sep 2024 13:11:22 +0000 Subject: [PATCH] Release 0.0.1-beta3 --- README.md | 30 +- poetry.lock | 27 +- pyproject.toml | 2 +- reference.md | 10901 +++------------- src/gooey/__init__.py | 165 +- src/gooey/ai_animation_generator/__init__.py | 2 - src/gooey/ai_animation_generator/client.py | 658 - src/gooey/ai_art_qr_code/__init__.py | 2 - src/gooey/ai_art_qr_code/client.py | 885 -- src/gooey/ai_background_changer/__init__.py | 2 - src/gooey/ai_background_changer/client.py | 573 - .../__init__.py | 2 - .../client.py | 817 -- src/gooey/ai_image_with_a_face/__init__.py | 2 - src/gooey/ai_image_with_a_face/client.py | 673 - src/gooey/bulk_runner/client.py | 500 +- src/gooey/chyron_plant_bot/__init__.py | 2 - src/gooey/chyron_plant_bot/client.py | 492 - src/gooey/client.py | 7428 ++++++++++- .../compare_ai_image_generators/__init__.py | 2 - .../compare_ai_image_generators/client.py | 686 - .../compare_ai_image_upscalers/__init__.py | 2 - .../compare_ai_image_upscalers/client.py | 537 - src/gooey/compare_ai_translations/__init__.py | 2 - src/gooey/compare_ai_translations/client.py | 525 - .../compare_ai_voice_generators/__init__.py | 2 - .../compare_ai_voice_generators/client.py | 755 -- .../copilot_for_your_enterprise/__init__.py | 25 + .../copilot_for_your_enterprise/client.py | 901 +- .../types/__init__.py | 25 + .../video_bots_page_request_asr_model.py | 3 +- .../video_bots_page_request_citation_style.py | 0 ...video_bots_page_request_embedding_model.py | 0 .../video_bots_page_request_lipsync_model.py | 0 ...ideo_bots_page_request_openai_tts_model.py | 0 ...deo_bots_page_request_openai_voice_name.py | 0 ..._bots_page_request_response_format_type.py | 5 + .../video_bots_page_request_selected_model.py | 8 + ...deo_bots_page_request_translation_model.py | 0 .../video_bots_page_request_tts_provider.py | 0 src/gooey/copilot_integrations/__init__.py | 2 + src/gooey/copilot_integrations/client.py | 101 +- .../copilot_integrations/types/__init__.py | 2 + .../types/create_stream_request_asr_model.py | 3 +- ...ate_stream_request_response_format_type.py | 5 + .../create_stream_request_selected_model.py | 8 + src/gooey/core/client_wrapper.py | 12 +- .../__init__.py | 2 - .../client.py | 734 -- .../edit_an_image_with_ai_prompt/__init__.py | 2 - .../edit_an_image_with_ai_prompt/client.py | 670 - src/gooey/embeddings/client.py | 409 +- src/gooey/errors/__init__.py | 3 +- src/gooey/errors/internal_server_error.py | 9 - src/gooey/evaluator/__init__.py | 3 + src/gooey/evaluator/client.py | 413 +- src/gooey/evaluator/types/__init__.py | 6 + ..._eval_page_request_response_format_type.py | 5 + .../bulk_eval_page_request_selected_model.py | 8 + src/gooey/functions/client.py | 230 +- .../__init__.py | 2 - .../client.py | 788 -- .../__init__.py | 2 - .../client.py | 689 - .../large_language_models_gpt3/__init__.py | 2 - .../large_language_models_gpt3/client.py | 566 - src/gooey/letter_writer/__init__.py | 2 - src/gooey/letter_writer/client.py | 669 - src/gooey/lip_syncing/__init__.py | 3 + src/gooey/lip_syncing/client.py | 352 +- src/gooey/lip_syncing/types/__init__.py | 5 + .../lipsync_page_request_selected_model.py | 0 .../lipsync_video_with_any_text/__init__.py | 2 - .../lipsync_video_with_any_text/client.py | 869 -- src/gooey/misc/client.py | 82 - .../__init__.py | 2 - .../client.py | 818 -- .../__init__.py | 2 - .../client.py | 573 - .../__init__.py | 2 - .../client.py | 662 - .../search_your_docs_with_gpt/__init__.py | 2 - src/gooey/search_your_docs_with_gpt/client.py | 744 -- src/gooey/smart_gpt/__init__.py | 3 + src/gooey/smart_gpt/client.py | 331 +- src/gooey/smart_gpt/types/__init__.py | 6 + ...t_gpt_page_request_response_format_type.py | 5 + .../smart_gpt_page_request_selected_model.py | 8 + .../__init__.py | 2 - .../speech_recognition_translation/client.py | 603 - .../summarize_your_docs_with_gpt/__init__.py | 2 - .../summarize_your_docs_with_gpt/client.py | 638 - .../__init__.py | 2 - .../client.py | 646 - .../text_guided_audio_generator/__init__.py | 2 - .../text_guided_audio_generator/client.py | 588 - src/gooey/types/__init__.py | 102 +- src/gooey/types/asr_page_request.py | 49 - .../types/asr_page_request_selected_model.py | 3 +- src/gooey/types/asr_page_status_response.py | 2 +- src/gooey/types/bulk_eval_page_request.py | 55 - .../types/bulk_eval_page_status_response.py | 2 +- src/gooey/types/bulk_runner_page_request.py | 56 - .../types/bulk_runner_page_status_response.py | 2 +- .../chyron_plant_page_status_response.py | 2 +- src/gooey/types/compare_llm_page_request.py | 38 - ...e_llm_page_request_selected_models_item.py | 8 + .../types/compare_llm_page_status_response.py | 2 +- .../types/compare_text2img_page_request.py | 45 - .../compare_text2img_page_status_response.py | 2 +- .../types/compare_upscaler_page_request.py | 46 - .../compare_upscaler_page_status_response.py | 2 +- src/gooey/types/deforum_sd_page_request.py | 42 - .../types/deforum_sd_page_status_response.py | 2 +- src/gooey/types/doc_extract_page_output.py | 1 + src/gooey/types/doc_extract_page_request.py | 47 - ...tract_page_request_response_format_type.py | 5 + ...extract_page_request_selected_asr_model.py | 3 +- ...doc_extract_page_request_selected_model.py | 8 + .../types/doc_extract_page_status_response.py | 2 +- src/gooey/types/doc_search_page_request.py | 55 - ...earch_page_request_response_format_type.py | 5 + .../doc_search_page_request_selected_model.py | 8 + .../types/doc_search_page_status_response.py | 2 +- src/gooey/types/doc_summary_page_request.py | 42 - ...mmary_page_request_response_format_type.py | 5 + ...summary_page_request_selected_asr_model.py | 3 +- ...doc_summary_page_request_selected_model.py | 8 + .../types/doc_summary_page_status_response.py | 2 +- .../email_face_inpainting_page_request.py | 52 - ...il_face_inpainting_page_status_response.py | 2 +- src/gooey/types/embeddings_page_request.py | 31 - .../types/embeddings_page_status_response.py | 2 +- .../types/face_inpainting_page_request.py | 43 - .../face_inpainting_page_status_response.py | 2 +- src/gooey/types/final_response.py | 2 +- src/gooey/types/functions_page_request.py | 31 - .../types/functions_page_status_response.py | 2 +- src/gooey/types/google_gpt_page_request.py | 65 - ...e_gpt_page_request_response_format_type.py | 5 + .../google_gpt_page_request_selected_model.py | 8 + .../types/google_gpt_page_status_response.py | 2 +- .../types/google_image_gen_page_request.py | 47 - .../google_image_gen_page_status_response.py | 2 +- .../types/image_segmentation_page_request.py | 37 - ...image_segmentation_page_status_response.py | 2 +- src/gooey/types/img2img_page_request.py | 44 - .../types/img2img_page_status_response.py | 2 +- .../letter_writer_page_status_response.py | 2 +- src/gooey/types/lipsync_page_request.py | 38 - .../types/lipsync_page_status_response.py | 2 +- src/gooey/types/lipsync_tts_page_request.py | 63 - .../types/lipsync_tts_page_status_response.py | 2 +- .../types/object_inpainting_page_request.py | 44 - .../object_inpainting_page_status_response.py | 2 +- .../types/qr_code_generator_page_request.py | 67 - .../qr_code_generator_page_status_response.py | 2 +- .../types/related_qn_a_doc_page_request.py | 69 - ...a_doc_page_request_response_format_type.py | 5 + ...ed_qn_a_doc_page_request_selected_model.py | 8 + .../related_qn_a_doc_page_status_response.py | 2 +- src/gooey/types/related_qn_a_page_request.py | 65 - ..._qn_a_page_request_response_format_type.py | 5 + ...elated_qn_a_page_request_selected_model.py | 8 + .../related_qn_a_page_status_response.py | 2 +- src/gooey/types/seo_summary_page_request.py | 51 - ...mmary_page_request_response_format_type.py | 5 + ...seo_summary_page_request_selected_model.py | 8 + .../types/seo_summary_page_status_response.py | 2 +- src/gooey/types/serp_search_location.py | 2 +- src/gooey/types/smart_gpt_page_request.py | 39 - .../types/smart_gpt_page_status_response.py | 2 +- .../types/social_lookup_email_page_request.py | 37 - ...email_page_request_response_format_type.py | 5 + ...ookup_email_page_request_selected_model.py | 8 + ...ocial_lookup_email_page_status_response.py | 2 +- src/gooey/types/text2audio_page_request.py | 37 - .../types/text2audio_page_status_response.py | 2 +- .../types/text_to_speech_page_request.py | 54 - .../text_to_speech_page_status_response.py | 2 +- src/gooey/types/translation_page_request.py | 39 - .../types/translation_page_status_response.py | 2 +- src/gooey/types/video_bots_page_request.py | 138 - .../types/video_bots_page_status_response.py | 2 +- src/gooey/web_search_gpt3/__init__.py | 2 - src/gooey/web_search_gpt3/client.py | 788 -- 186 files changed, 9457 insertions(+), 32135 deletions(-) delete mode 100644 src/gooey/ai_animation_generator/__init__.py delete mode 100644 src/gooey/ai_animation_generator/client.py delete mode 100644 src/gooey/ai_art_qr_code/__init__.py delete mode 100644 src/gooey/ai_art_qr_code/client.py delete mode 100644 src/gooey/ai_background_changer/__init__.py delete mode 100644 src/gooey/ai_background_changer/client.py delete mode 100644 src/gooey/ai_generated_photo_from_email_profile_lookup/__init__.py delete mode 100644 src/gooey/ai_generated_photo_from_email_profile_lookup/client.py delete mode 100644 src/gooey/ai_image_with_a_face/__init__.py delete mode 100644 src/gooey/ai_image_with_a_face/client.py delete mode 100644 src/gooey/chyron_plant_bot/__init__.py delete mode 100644 src/gooey/chyron_plant_bot/client.py delete mode 100644 src/gooey/compare_ai_image_generators/__init__.py delete mode 100644 src/gooey/compare_ai_image_generators/client.py delete mode 100644 src/gooey/compare_ai_image_upscalers/__init__.py delete mode 100644 src/gooey/compare_ai_image_upscalers/client.py delete mode 100644 src/gooey/compare_ai_translations/__init__.py delete mode 100644 src/gooey/compare_ai_translations/client.py delete mode 100644 src/gooey/compare_ai_voice_generators/__init__.py delete mode 100644 src/gooey/compare_ai_voice_generators/client.py create mode 100644 src/gooey/copilot_for_your_enterprise/types/__init__.py rename src/gooey/{ => copilot_for_your_enterprise}/types/video_bots_page_request_asr_model.py (94%) rename src/gooey/{ => copilot_for_your_enterprise}/types/video_bots_page_request_citation_style.py (100%) rename src/gooey/{ => copilot_for_your_enterprise}/types/video_bots_page_request_embedding_model.py (100%) rename src/gooey/{ => copilot_for_your_enterprise}/types/video_bots_page_request_lipsync_model.py (100%) rename src/gooey/{ => copilot_for_your_enterprise}/types/video_bots_page_request_openai_tts_model.py (100%) rename src/gooey/{ => copilot_for_your_enterprise}/types/video_bots_page_request_openai_voice_name.py (100%) create mode 100644 src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_response_format_type.py rename src/gooey/{ => copilot_for_your_enterprise}/types/video_bots_page_request_selected_model.py (79%) rename src/gooey/{ => copilot_for_your_enterprise}/types/video_bots_page_request_translation_model.py (100%) rename src/gooey/{ => copilot_for_your_enterprise}/types/video_bots_page_request_tts_provider.py (100%) create mode 100644 src/gooey/copilot_integrations/types/create_stream_request_response_format_type.py delete mode 100644 src/gooey/create_a_perfect_seo_optimized_title_paragraph/__init__.py delete mode 100644 src/gooey/create_a_perfect_seo_optimized_title_paragraph/client.py delete mode 100644 src/gooey/edit_an_image_with_ai_prompt/__init__.py delete mode 100644 src/gooey/edit_an_image_with_ai_prompt/client.py delete mode 100644 src/gooey/errors/internal_server_error.py create mode 100644 src/gooey/evaluator/types/__init__.py create mode 100644 src/gooey/evaluator/types/bulk_eval_page_request_response_format_type.py rename src/gooey/{ => evaluator}/types/bulk_eval_page_request_selected_model.py (79%) delete mode 100644 src/gooey/generate_people_also_ask_seo_content/__init__.py delete mode 100644 src/gooey/generate_people_also_ask_seo_content/client.py delete mode 100644 src/gooey/generate_product_photo_backgrounds/__init__.py delete mode 100644 src/gooey/generate_product_photo_backgrounds/client.py delete mode 100644 src/gooey/large_language_models_gpt3/__init__.py delete mode 100644 src/gooey/large_language_models_gpt3/client.py delete mode 100644 src/gooey/letter_writer/__init__.py delete mode 100644 src/gooey/letter_writer/client.py create mode 100644 src/gooey/lip_syncing/types/__init__.py rename src/gooey/{ => lip_syncing}/types/lipsync_page_request_selected_model.py (100%) delete mode 100644 src/gooey/lipsync_video_with_any_text/__init__.py delete mode 100644 src/gooey/lipsync_video_with_any_text/client.py delete mode 100644 src/gooey/people_also_ask_answers_from_a_doc/__init__.py delete mode 100644 src/gooey/people_also_ask_answers_from_a_doc/client.py delete mode 100644 src/gooey/profile_lookup_gpt3for_ai_personalized_emails/__init__.py delete mode 100644 src/gooey/profile_lookup_gpt3for_ai_personalized_emails/client.py delete mode 100644 src/gooey/render_image_search_results_with_ai/__init__.py delete mode 100644 src/gooey/render_image_search_results_with_ai/client.py delete mode 100644 src/gooey/search_your_docs_with_gpt/__init__.py delete mode 100644 src/gooey/search_your_docs_with_gpt/client.py create mode 100644 src/gooey/smart_gpt/types/__init__.py create mode 100644 src/gooey/smart_gpt/types/smart_gpt_page_request_response_format_type.py rename src/gooey/{ => smart_gpt}/types/smart_gpt_page_request_selected_model.py (79%) delete mode 100644 src/gooey/speech_recognition_translation/__init__.py delete mode 100644 src/gooey/speech_recognition_translation/client.py delete mode 100644 src/gooey/summarize_your_docs_with_gpt/__init__.py delete mode 100644 src/gooey/summarize_your_docs_with_gpt/client.py delete mode 100644 src/gooey/synthetic_data_maker_for_videos_pd_fs/__init__.py delete mode 100644 src/gooey/synthetic_data_maker_for_videos_pd_fs/client.py delete mode 100644 src/gooey/text_guided_audio_generator/__init__.py delete mode 100644 src/gooey/text_guided_audio_generator/client.py delete mode 100644 src/gooey/types/asr_page_request.py delete mode 100644 src/gooey/types/bulk_eval_page_request.py delete mode 100644 src/gooey/types/bulk_runner_page_request.py delete mode 100644 src/gooey/types/compare_llm_page_request.py delete mode 100644 src/gooey/types/compare_text2img_page_request.py delete mode 100644 src/gooey/types/compare_upscaler_page_request.py delete mode 100644 src/gooey/types/deforum_sd_page_request.py delete mode 100644 src/gooey/types/doc_extract_page_request.py create mode 100644 src/gooey/types/doc_extract_page_request_response_format_type.py delete mode 100644 src/gooey/types/doc_search_page_request.py create mode 100644 src/gooey/types/doc_search_page_request_response_format_type.py delete mode 100644 src/gooey/types/doc_summary_page_request.py create mode 100644 src/gooey/types/doc_summary_page_request_response_format_type.py delete mode 100644 src/gooey/types/email_face_inpainting_page_request.py delete mode 100644 src/gooey/types/embeddings_page_request.py delete mode 100644 src/gooey/types/face_inpainting_page_request.py delete mode 100644 src/gooey/types/functions_page_request.py delete mode 100644 src/gooey/types/google_gpt_page_request.py create mode 100644 src/gooey/types/google_gpt_page_request_response_format_type.py delete mode 100644 src/gooey/types/google_image_gen_page_request.py delete mode 100644 src/gooey/types/image_segmentation_page_request.py delete mode 100644 src/gooey/types/img2img_page_request.py delete mode 100644 src/gooey/types/lipsync_page_request.py delete mode 100644 src/gooey/types/lipsync_tts_page_request.py delete mode 100644 src/gooey/types/object_inpainting_page_request.py delete mode 100644 src/gooey/types/qr_code_generator_page_request.py delete mode 100644 src/gooey/types/related_qn_a_doc_page_request.py create mode 100644 src/gooey/types/related_qn_a_doc_page_request_response_format_type.py delete mode 100644 src/gooey/types/related_qn_a_page_request.py create mode 100644 src/gooey/types/related_qn_a_page_request_response_format_type.py delete mode 100644 src/gooey/types/seo_summary_page_request.py create mode 100644 src/gooey/types/seo_summary_page_request_response_format_type.py delete mode 100644 src/gooey/types/smart_gpt_page_request.py delete mode 100644 src/gooey/types/social_lookup_email_page_request.py create mode 100644 src/gooey/types/social_lookup_email_page_request_response_format_type.py delete mode 100644 src/gooey/types/text2audio_page_request.py delete mode 100644 src/gooey/types/text_to_speech_page_request.py delete mode 100644 src/gooey/types/translation_page_request.py delete mode 100644 src/gooey/types/video_bots_page_request.py delete mode 100644 src/gooey/web_search_gpt3/__init__.py delete mode 100644 src/gooey/web_search_gpt3/client.py diff --git a/README.md b/README.md index e66d2ad..278b73e 100644 --- a/README.md +++ b/README.md @@ -16,14 +16,18 @@ pip install gooeyai Instantiate and use the client with the following: ```python -from gooey import Gooey +from gooey import AnimationPrompt, Gooey client = Gooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) -client.copilot_integrations.video_bots_stream_create( - integration_id="integration_id", +client.animate( + animation_prompts=[ + AnimationPrompt( + frame="frame", + prompt="prompt", + ) + ], ) ``` @@ -34,17 +38,21 @@ The SDK also exports an `async` client so that you can make non-blocking calls t ```python import asyncio -from gooey import AsyncGooey +from gooey import AnimationPrompt, AsyncGooey client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) async def main() -> None: - await client.copilot_integrations.video_bots_stream_create( - integration_id="integration_id", + await client.animate( + animation_prompts=[ + AnimationPrompt( + frame="frame", + prompt="prompt", + ) + ], ) @@ -60,7 +68,7 @@ will be thrown. from gooey.core.api_error import ApiError try: - client.copilot_integrations.video_bots_stream_create(...) + client.animate(...) except ApiError as e: print(e.status_code) print(e.body) @@ -83,7 +91,7 @@ A request is deemed retriable when any of the following HTTP status codes is ret Use the `max_retries` request option to configure this behavior. ```python -client.copilot_integrations.video_bots_stream_create(..., { +client.animate(..., { "max_retries": 1 }) ``` @@ -103,7 +111,7 @@ client = Gooey( # Override timeout for a specific method -client.copilot_integrations.video_bots_stream_create(..., { +client.animate(..., { "timeout_in_seconds": 1 }) ``` diff --git a/poetry.lock b/poetry.lock index 464f603..228c9f5 100644 --- a/poetry.lock +++ b/poetry.lock @@ -38,13 +38,13 @@ trio = ["trio (>=0.23)"] [[package]] name = "certifi" -version = "2024.7.4" +version = "2024.8.30" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" files = [ - {file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"}, - {file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"}, + {file = "certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8"}, + {file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"}, ] [[package]] @@ -106,13 +106,13 @@ trio = ["trio (>=0.22.0,<0.26.0)"] [[package]] name = "httpx" -version = "0.27.0" +version = "0.27.2" description = "The next generation HTTP client." optional = false python-versions = ">=3.8" files = [ - {file = "httpx-0.27.0-py3-none-any.whl", hash = "sha256:71d5465162c13681bff01ad59b2cc68dd838ea1f10e51574bac27103f00c91a5"}, - {file = "httpx-0.27.0.tar.gz", hash = "sha256:a0cb88a46f32dc874e04ee956e4c2764aba2aa228f650b06788ba6bda2962ab5"}, + {file = "httpx-0.27.2-py3-none-any.whl", hash = "sha256:7bb2708e112d8fdd7829cd4243970f0c223274051cb35ee80c03301ee29a3df0"}, + {file = "httpx-0.27.2.tar.gz", hash = "sha256:f7c2be1d2f3c3c3160d441802406b206c2b76f5947b11115e6df10c6c65e66c2"}, ] [package.dependencies] @@ -127,16 +127,17 @@ brotli = ["brotli", "brotlicffi"] cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] +zstd = ["zstandard (>=0.18.0)"] [[package]] name = "idna" -version = "3.7" +version = "3.8" description = "Internationalized Domain Names in Applications (IDNA)" optional = false -python-versions = ">=3.5" +python-versions = ">=3.6" files = [ - {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, - {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, + {file = "idna-3.8-py3-none-any.whl", hash = "sha256:050b4e5baadcd44d760cedbd2b8e639f2ff89bbc7a5730fcc662954303377aac"}, + {file = "idna-3.8.tar.gz", hash = "sha256:d838c2c0ed6fced7693d5e8ab8e734d5f8fda53a039c0164afb0b82e771e3603"}, ] [[package]] @@ -445,13 +446,13 @@ files = [ [[package]] name = "types-python-dateutil" -version = "2.9.0.20240316" +version = "2.9.0.20240821" description = "Typing stubs for python-dateutil" optional = false python-versions = ">=3.8" files = [ - {file = "types-python-dateutil-2.9.0.20240316.tar.gz", hash = "sha256:5d2f2e240b86905e40944dd787db6da9263f0deabef1076ddaed797351ec0202"}, - {file = "types_python_dateutil-2.9.0.20240316-py3-none-any.whl", hash = "sha256:6b8cb66d960771ce5ff974e9dd45e38facb81718cc1e208b10b1baccbfdbee3b"}, + {file = "types-python-dateutil-2.9.0.20240821.tar.gz", hash = "sha256:9649d1dcb6fef1046fb18bebe9ea2aa0028b160918518c34589a46045f6ebd98"}, + {file = "types_python_dateutil-2.9.0.20240821-py3-none-any.whl", hash = "sha256:f5889fcb4e63ed4aaa379b44f93c32593d50b9a94c9a60a0c854d8cc3511cd57"}, ] [[package]] diff --git a/pyproject.toml b/pyproject.toml index ed8f38e..745914e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "gooeyai" -version = "0.0.1b1" +version = "0.0.1-beta3" description = "" readme = "README.md" authors = [] diff --git a/reference.md b/reference.md index c1df924..b24b52e 100644 --- a/reference.md +++ b/reference.md @@ -1,6 +1,5 @@ # Reference -## CopilotIntegrations -
client.copilot_integrations.video_bots_stream_create(...) +
client.animate(...)
@@ -13,14 +12,18 @@
```python -from gooey import Gooey +from gooey import AnimationPrompt, Gooey client = Gooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) -client.copilot_integrations.video_bots_stream_create( - integration_id="integration_id", +client.animate( + animation_prompts=[ + AnimationPrompt( + frame="frame", + prompt="prompt", + ) + ], ) ``` @@ -37,7 +40,7 @@ client.copilot_integrations.video_bots_stream_create(
-**integration_id:** `str` — Your Integration ID as shown in the Copilot Integrations tab +**animation_prompts:** `typing.Sequence[AnimationPrompt]`
@@ -45,13 +48,7 @@ client.copilot_integrations.video_bots_stream_create(
-**conversation_id:** `typing.Optional[str]` - -The gooey conversation ID. - -If not provided, a new conversation will be started and a new ID will be returned in the response. Use this to maintain the state of the conversation between requests. - -Note that you may not provide a custom ID here, and must only use the `conversation_id` returned in a previous response. +**example_id:** `typing.Optional[str]`
@@ -59,11 +56,7 @@ Note that you may not provide a custom ID here, and must only use the `conversat
-**user_id:** `typing.Optional[str]` - -Your app's custom user ID. - -If not provided, a random user will be created and a new ID will be returned in the response. If a `conversation_id` is provided, this field is automatically set to the user's id associated with that conversation. +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -71,11 +64,7 @@ If not provided, a random user will be created and a new ID will be returned in
-**user_message_id:** `typing.Optional[str]` - -Your app's custom message ID for the user message. - -If not provided, a random ID will be generated and returned in the response. This is useful for tracking messages in the conversation. +**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -83,7 +72,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**button_pressed:** `typing.Optional[ButtonPressed]` — The button that was pressed by the user. +**max_frames:** `typing.Optional[int]`
@@ -91,7 +80,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` +**selected_model:** `typing.Optional[DeforumSdPageRequestSelectedModel]`
@@ -99,7 +88,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**animation_mode:** `typing.Optional[str]`
@@ -107,7 +96,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**input_prompt:** `typing.Optional[str]` +**zoom:** `typing.Optional[str]`
@@ -115,7 +104,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**input_audio:** `typing.Optional[str]` +**translation_x:** `typing.Optional[str]`
@@ -123,7 +112,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**input_images:** `typing.Optional[typing.Sequence[str]]` +**translation_y:** `typing.Optional[str]`
@@ -131,7 +120,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**input_documents:** `typing.Optional[typing.Sequence[str]]` +**rotation3d_x:** `typing.Optional[str]`
@@ -139,7 +128,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**doc_extract_url:** `typing.Optional[str]` — Select a workflow to extract text from documents and images. +**rotation3d_y:** `typing.Optional[str]`
@@ -147,7 +136,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**messages:** `typing.Optional[typing.Sequence[ConversationEntry]]` +**rotation3d_z:** `typing.Optional[str]`
@@ -155,7 +144,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**bot_script:** `typing.Optional[str]` +**fps:** `typing.Optional[int]`
@@ -163,7 +152,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**selected_model:** `typing.Optional[CreateStreamRequestSelectedModel]` +**seed:** `typing.Optional[int]`
@@ -171,7 +160,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**document_model:** `typing.Optional[str]` — When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) +**settings:** `typing.Optional[RunSettings]`
@@ -179,31 +168,55 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**avoid_repetition:** `typing.Optional[bool]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
-
-**num_outputs:** `typing.Optional[int]` -
+
+
client.qr_code(...)
-**quality:** `typing.Optional[float]` - +#### 🔌 Usage + +
+
+ +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.qr_code( + text_prompt="text_prompt", +) + +``` +
+
+#### ⚙️ Parameters +
-**max_tokens:** `typing.Optional[int]` +
+
+ +**text_prompt:** `str`
@@ -211,7 +224,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**sampling_temperature:** `typing.Optional[float]` +**example_id:** `typing.Optional[str]`
@@ -219,7 +232,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**task_instructions:** `typing.Optional[str]` +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -227,7 +240,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**query_instructions:** `typing.Optional[str]` +**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -235,7 +248,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**keyword_instructions:** `typing.Optional[str]` +**qr_code_data:** `typing.Optional[str]`
@@ -243,7 +256,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**documents:** `typing.Optional[typing.Sequence[str]]` +**qr_code_input_image:** `typing.Optional[str]`
@@ -251,7 +264,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**max_references:** `typing.Optional[int]` +**qr_code_vcard:** `typing.Optional[Vcard]`
@@ -259,7 +272,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**max_context_words:** `typing.Optional[int]` +**qr_code_file:** `typing.Optional[str]`
@@ -267,7 +280,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**scroll_jump:** `typing.Optional[int]` +**use_url_shortener:** `typing.Optional[bool]`
@@ -275,7 +288,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**embedding_model:** `typing.Optional[CreateStreamRequestEmbeddingModel]` +**negative_prompt:** `typing.Optional[str]`
@@ -283,12 +296,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**dense_weight:** `typing.Optional[float]` - - -Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. -Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - +**image_prompt:** `typing.Optional[str]`
@@ -296,7 +304,9 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**citation_style:** `typing.Optional[CreateStreamRequestCitationStyle]` +**image_prompt_controlnet_models:** `typing.Optional[ + typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem] +]`
@@ -304,7 +314,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**use_url_shortener:** `typing.Optional[bool]` +**image_prompt_strength:** `typing.Optional[float]`
@@ -312,7 +322,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**asr_model:** `typing.Optional[CreateStreamRequestAsrModel]` — Choose a model to transcribe incoming audio messages to text. +**image_prompt_scale:** `typing.Optional[float]`
@@ -320,7 +330,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**asr_language:** `typing.Optional[str]` — Choose a language to transcribe incoming audio messages to text. +**image_prompt_pos_x:** `typing.Optional[float]`
@@ -328,7 +338,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**translation_model:** `typing.Optional[CreateStreamRequestTranslationModel]` +**image_prompt_pos_y:** `typing.Optional[float]`
@@ -336,7 +346,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**user_language:** `typing.Optional[str]` — Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. +**selected_model:** `typing.Optional[QrCodeGeneratorPageRequestSelectedModel]`
@@ -344,11 +354,9 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**input_glossary_document:** `typing.Optional[str]` - - -Translation Glossary for User Langauge -> LLM Language (English) - +**selected_controlnet_model:** `typing.Optional[ + typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem] +]`
@@ -356,11 +364,7 @@ Translation Glossary for User Langauge -> LLM Language (English)
-**output_glossary_document:** `typing.Optional[str]` - - -Translation Glossary for LLM Language (English) -> User Langauge - +**output_width:** `typing.Optional[int]`
@@ -368,7 +372,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**lipsync_model:** `typing.Optional[CreateStreamRequestLipsyncModel]` +**output_height:** `typing.Optional[int]`
@@ -376,7 +380,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**tools:** `typing.Optional[typing.Sequence[LlmTools]]` — Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). +**guidance_scale:** `typing.Optional[float]`
@@ -384,7 +388,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**tts_provider:** `typing.Optional[CreateStreamRequestTtsProvider]` +**controlnet_conditioning_scale:** `typing.Optional[typing.Sequence[float]]`
@@ -392,7 +396,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**uberduck_voice_name:** `typing.Optional[str]` +**num_outputs:** `typing.Optional[int]`
@@ -400,7 +404,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**uberduck_speaking_rate:** `typing.Optional[float]` +**quality:** `typing.Optional[int]`
@@ -408,7 +412,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**google_voice_name:** `typing.Optional[str]` +**scheduler:** `typing.Optional[QrCodeGeneratorPageRequestScheduler]`
@@ -416,7 +420,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**google_speaking_rate:** `typing.Optional[float]` +**seed:** `typing.Optional[int]`
@@ -424,7 +428,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**google_pitch:** `typing.Optional[float]` +**obj_scale:** `typing.Optional[float]`
@@ -432,7 +436,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**bark_history_prompt:** `typing.Optional[str]` +**obj_pos_x:** `typing.Optional[float]`
@@ -440,7 +444,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead +**obj_pos_y:** `typing.Optional[float]`
@@ -448,7 +452,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**elevenlabs_api_key:** `typing.Optional[str]` +**settings:** `typing.Optional[RunSettings]`
@@ -456,23 +460,56 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**elevenlabs_voice_id:** `typing.Optional[str]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+ + +
+
+
+
client.seo_people_also_ask(...)
-**elevenlabs_model:** `typing.Optional[str]` - +#### 🔌 Usage + +
+
+ +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.seo_people_also_ask( + search_query="search_query", + site_filter="site_filter", +) + +``` +
+
+#### ⚙️ Parameters +
-**elevenlabs_stability:** `typing.Optional[float]` +
+
+ +**search_query:** `str`
@@ -480,7 +517,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**elevenlabs_similarity_boost:** `typing.Optional[float]` +**site_filter:** `str`
@@ -488,7 +525,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**elevenlabs_style:** `typing.Optional[float]` +**example_id:** `typing.Optional[str]`
@@ -496,7 +533,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**elevenlabs_speaker_boost:** `typing.Optional[bool]` +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -504,7 +541,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**azure_voice_name:** `typing.Optional[str]` +**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -512,7 +549,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**openai_voice_name:** `typing.Optional[CreateStreamRequestOpenaiVoiceName]` +**task_instructions:** `typing.Optional[str]`
@@ -520,7 +557,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**openai_tts_model:** `typing.Optional[CreateStreamRequestOpenaiTtsModel]` +**query_instructions:** `typing.Optional[str]`
@@ -528,7 +565,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**input_face:** `typing.Optional[str]` +**selected_model:** `typing.Optional[RelatedQnAPageRequestSelectedModel]`
@@ -536,7 +573,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**face_padding_top:** `typing.Optional[int]` +**max_search_urls:** `typing.Optional[int]`
@@ -544,7 +581,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**face_padding_bottom:** `typing.Optional[int]` +**max_references:** `typing.Optional[int]`
@@ -552,7 +589,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**face_padding_left:** `typing.Optional[int]` +**max_context_words:** `typing.Optional[int]`
@@ -560,7 +597,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**face_padding_right:** `typing.Optional[int]` +**scroll_jump:** `typing.Optional[int]`
@@ -568,7 +605,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**sadtalker_settings:** `typing.Optional[SadTalkerSettings]` +**embedding_model:** `typing.Optional[RelatedQnAPageRequestEmbeddingModel]`
@@ -576,7 +613,12 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**input_text:** `typing.Optional[str]` — Use `input_prompt` instead +**dense_weight:** `typing.Optional[float]` + + +Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. +Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. +
@@ -584,56 +626,87 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**avoid_repetition:** `typing.Optional[bool]`
-
-
+
+
+**num_outputs:** `typing.Optional[int]` +
-
-
client.copilot_integrations.video_bots_stream(...)
-#### 🔌 Usage +**quality:** `typing.Optional[float]` + +
+
+**max_tokens:** `typing.Optional[int]` + +
+
+
-```python -from gooey import Gooey +**sampling_temperature:** `typing.Optional[float]` + +
+
-client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.copilot_integrations.video_bots_stream( - request_id="request_id", -) +
+
-``` +**response_format_type:** `typing.Optional[RelatedQnAPageRequestResponseFormatType]` +
+ +
+
+ +**serp_search_location:** `typing.Optional[SerpSearchLocation]` +
-#### ⚙️ Parameters +
+
+ +**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead + +
+
+**serp_search_type:** `typing.Optional[SerpSearchType]` + +
+
+
-**request_id:** `str` +**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead + +
+
+ +
+
+ +**settings:** `typing.Optional[RunSettings]`
@@ -653,8 +726,7 @@ client.copilot_integrations.video_bots_stream(
-## CopilotForYourEnterprise -
client.copilot_for_your_enterprise.video_bots(...) +
client.seo_content(...)
@@ -670,10 +742,14 @@ client.copilot_integrations.video_bots_stream( from gooey import Gooey client = Gooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) -client.copilot_for_your_enterprise.video_bots() +client.seo_content( + search_query="search_query", + keywords="keywords", + title="title", + company_url="company_url", +) ```
@@ -689,7 +765,7 @@ client.copilot_for_your_enterprise.video_bots()
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` +**search_query:** `str`
@@ -697,7 +773,7 @@ client.copilot_for_your_enterprise.video_bots()
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**keywords:** `str`
@@ -705,7 +781,7 @@ client.copilot_for_your_enterprise.video_bots()
-**input_prompt:** `typing.Optional[str]` +**title:** `str`
@@ -713,7 +789,7 @@ client.copilot_for_your_enterprise.video_bots()
-**input_audio:** `typing.Optional[str]` +**company_url:** `str`
@@ -721,7 +797,7 @@ client.copilot_for_your_enterprise.video_bots()
-**input_images:** `typing.Optional[typing.Sequence[str]]` +**example_id:** `typing.Optional[str]`
@@ -729,7 +805,7 @@ client.copilot_for_your_enterprise.video_bots()
-**input_documents:** `typing.Optional[typing.Sequence[str]]` +**task_instructions:** `typing.Optional[str]`
@@ -737,7 +813,7 @@ client.copilot_for_your_enterprise.video_bots()
-**doc_extract_url:** `typing.Optional[str]` — Select a workflow to extract text from documents and images. +**enable_html:** `typing.Optional[bool]`
@@ -745,7 +821,7 @@ client.copilot_for_your_enterprise.video_bots()
-**messages:** `typing.Optional[typing.Sequence[ConversationEntry]]` +**selected_model:** `typing.Optional[SeoSummaryPageRequestSelectedModel]`
@@ -753,7 +829,7 @@ client.copilot_for_your_enterprise.video_bots()
-**bot_script:** `typing.Optional[str]` +**max_search_urls:** `typing.Optional[int]`
@@ -761,7 +837,7 @@ client.copilot_for_your_enterprise.video_bots()
-**selected_model:** `typing.Optional[VideoBotsPageRequestSelectedModel]` +**enable_crosslinks:** `typing.Optional[bool]`
@@ -769,7 +845,7 @@ client.copilot_for_your_enterprise.video_bots()
-**document_model:** `typing.Optional[str]` — When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) +**seed:** `typing.Optional[int]`
@@ -817,23 +893,7 @@ client.copilot_for_your_enterprise.video_bots()
-**task_instructions:** `typing.Optional[str]` - -
-
- -
-
- -**query_instructions:** `typing.Optional[str]` - -
-
- -
-
- -**keyword_instructions:** `typing.Optional[str]` +**response_format_type:** `typing.Optional[SeoSummaryPageRequestResponseFormatType]`
@@ -841,7 +901,7 @@ client.copilot_for_your_enterprise.video_bots()
-**documents:** `typing.Optional[typing.Sequence[str]]` +**serp_search_location:** `typing.Optional[SerpSearchLocation]`
@@ -849,7 +909,7 @@ client.copilot_for_your_enterprise.video_bots()
-**max_references:** `typing.Optional[int]` +**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead
@@ -857,7 +917,7 @@ client.copilot_for_your_enterprise.video_bots()
-**max_context_words:** `typing.Optional[int]` +**serp_search_type:** `typing.Optional[SerpSearchType]`
@@ -865,7 +925,7 @@ client.copilot_for_your_enterprise.video_bots()
-**scroll_jump:** `typing.Optional[int]` +**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead
@@ -873,7 +933,7 @@ client.copilot_for_your_enterprise.video_bots()
-**embedding_model:** `typing.Optional[VideoBotsPageRequestEmbeddingModel]` +**settings:** `typing.Optional[RunSettings]`
@@ -881,90 +941,56 @@ client.copilot_for_your_enterprise.video_bots()
-**dense_weight:** `typing.Optional[float]` - -Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. -Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
- -
-
- -**citation_style:** `typing.Optional[VideoBotsPageRequestCitationStyle]` -
-
-
-**use_url_shortener:** `typing.Optional[bool]` -
+
+
client.web_search_llm(...)
-**asr_model:** `typing.Optional[VideoBotsPageRequestAsrModel]` — Choose a model to transcribe incoming audio messages to text. - -
-
+#### 🔌 Usage
-**asr_language:** `typing.Optional[str]` — Choose a language to transcribe incoming audio messages to text. - -
-
-
-**translation_model:** `typing.Optional[VideoBotsPageRequestTranslationModel]` - -
-
+```python +from gooey import Gooey -
-
+client = Gooey( + api_key="YOUR_API_KEY", +) +client.web_search_llm( + search_query="search_query", + site_filter="site_filter", +) -**user_language:** `typing.Optional[str]` — Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. - +```
- -
-
- -**input_glossary_document:** `typing.Optional[str]` — Translation Glossary for User Langauge -> LLM Language (English) -
-
-
- -**output_glossary_document:** `typing.Optional[str]` — Translation Glossary for LLM Language (English) -> User Langauge - -
-
+#### ⚙️ Parameters
-**lipsync_model:** `typing.Optional[VideoBotsPageRequestLipsyncModel]` - -
-
-
-**tools:** `typing.Optional[typing.Sequence[LlmTools]]` — Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). +**search_query:** `str`
@@ -972,7 +998,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**tts_provider:** `typing.Optional[VideoBotsPageRequestTtsProvider]` +**site_filter:** `str`
@@ -980,7 +1006,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**uberduck_voice_name:** `typing.Optional[str]` +**example_id:** `typing.Optional[str]`
@@ -988,7 +1014,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**uberduck_speaking_rate:** `typing.Optional[float]` +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -996,7 +1022,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**google_voice_name:** `typing.Optional[str]` +**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -1004,7 +1030,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**google_speaking_rate:** `typing.Optional[float]` +**task_instructions:** `typing.Optional[str]`
@@ -1012,7 +1038,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**google_pitch:** `typing.Optional[float]` +**query_instructions:** `typing.Optional[str]`
@@ -1020,7 +1046,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**bark_history_prompt:** `typing.Optional[str]` +**selected_model:** `typing.Optional[GoogleGptPageRequestSelectedModel]`
@@ -1028,7 +1054,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead +**max_search_urls:** `typing.Optional[int]`
@@ -1036,7 +1062,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**elevenlabs_api_key:** `typing.Optional[str]` +**max_references:** `typing.Optional[int]`
@@ -1044,7 +1070,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**elevenlabs_voice_id:** `typing.Optional[str]` +**max_context_words:** `typing.Optional[int]`
@@ -1052,7 +1078,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**elevenlabs_model:** `typing.Optional[str]` +**scroll_jump:** `typing.Optional[int]`
@@ -1060,7 +1086,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**elevenlabs_stability:** `typing.Optional[float]` +**embedding_model:** `typing.Optional[GoogleGptPageRequestEmbeddingModel]`
@@ -1068,15 +1094,12 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**elevenlabs_similarity_boost:** `typing.Optional[float]` - -
-
+**dense_weight:** `typing.Optional[float]` -
-
-**elevenlabs_style:** `typing.Optional[float]` +Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. +Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. +
@@ -1084,7 +1107,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**elevenlabs_speaker_boost:** `typing.Optional[bool]` +**avoid_repetition:** `typing.Optional[bool]`
@@ -1092,7 +1115,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**azure_voice_name:** `typing.Optional[str]` +**num_outputs:** `typing.Optional[int]`
@@ -1100,7 +1123,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**openai_voice_name:** `typing.Optional[VideoBotsPageRequestOpenaiVoiceName]` +**quality:** `typing.Optional[float]`
@@ -1108,7 +1131,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**openai_tts_model:** `typing.Optional[VideoBotsPageRequestOpenaiTtsModel]` +**max_tokens:** `typing.Optional[int]`
@@ -1116,7 +1139,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**input_face:** `typing.Optional[str]` +**sampling_temperature:** `typing.Optional[float]`
@@ -1124,7 +1147,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**face_padding_top:** `typing.Optional[int]` +**response_format_type:** `typing.Optional[GoogleGptPageRequestResponseFormatType]`
@@ -1132,7 +1155,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**face_padding_bottom:** `typing.Optional[int]` +**serp_search_location:** `typing.Optional[SerpSearchLocation]`
@@ -1140,7 +1163,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**face_padding_left:** `typing.Optional[int]` +**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead
@@ -1148,7 +1171,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**face_padding_right:** `typing.Optional[int]` +**serp_search_type:** `typing.Optional[SerpSearchType]`
@@ -1156,7 +1179,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**sadtalker_settings:** `typing.Optional[SadTalkerSettings]` +**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead
@@ -1184,7 +1207,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-
client.copilot_for_your_enterprise.async_video_bots(...) +
client.personalize_email(...)
@@ -1200,10 +1223,11 @@ Generally speaking, dense embeddings excel at understanding the context of the q from gooey import Gooey client = Gooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) -client.copilot_for_your_enterprise.async_video_bots() +client.personalize_email( + email_address="email_address", +) ```
@@ -1219,7 +1243,7 @@ client.copilot_for_your_enterprise.async_video_bots()
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` +**email_address:** `str`
@@ -1227,7 +1251,7 @@ client.copilot_for_your_enterprise.async_video_bots()
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**example_id:** `typing.Optional[str]`
@@ -1235,7 +1259,7 @@ client.copilot_for_your_enterprise.async_video_bots()
-**input_prompt:** `typing.Optional[str]` +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -1243,7 +1267,7 @@ client.copilot_for_your_enterprise.async_video_bots()
-**input_audio:** `typing.Optional[str]` +**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -1251,7 +1275,7 @@ client.copilot_for_your_enterprise.async_video_bots()
-**input_images:** `typing.Optional[typing.Sequence[str]]` +**input_prompt:** `typing.Optional[str]`
@@ -1259,7 +1283,7 @@ client.copilot_for_your_enterprise.async_video_bots()
-**input_documents:** `typing.Optional[typing.Sequence[str]]` +**selected_model:** `typing.Optional[SocialLookupEmailPageRequestSelectedModel]`
@@ -1267,7 +1291,7 @@ client.copilot_for_your_enterprise.async_video_bots()
-**doc_extract_url:** `typing.Optional[str]` — Select a workflow to extract text from documents and images. +**avoid_repetition:** `typing.Optional[bool]`
@@ -1275,7 +1299,7 @@ client.copilot_for_your_enterprise.async_video_bots()
-**messages:** `typing.Optional[typing.Sequence[ConversationEntry]]` +**num_outputs:** `typing.Optional[int]`
@@ -1283,7 +1307,7 @@ client.copilot_for_your_enterprise.async_video_bots()
-**bot_script:** `typing.Optional[str]` +**quality:** `typing.Optional[float]`
@@ -1291,7 +1315,7 @@ client.copilot_for_your_enterprise.async_video_bots()
-**selected_model:** `typing.Optional[VideoBotsPageRequestSelectedModel]` +**max_tokens:** `typing.Optional[int]`
@@ -1299,7 +1323,7 @@ client.copilot_for_your_enterprise.async_video_bots()
-**document_model:** `typing.Optional[str]` — When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) +**sampling_temperature:** `typing.Optional[float]`
@@ -1307,7 +1331,7 @@ client.copilot_for_your_enterprise.async_video_bots()
-**avoid_repetition:** `typing.Optional[bool]` +**response_format_type:** `typing.Optional[SocialLookupEmailPageRequestResponseFormatType]`
@@ -1315,7 +1339,7 @@ client.copilot_for_your_enterprise.async_video_bots()
-**num_outputs:** `typing.Optional[int]` +**settings:** `typing.Optional[RunSettings]`
@@ -1323,39 +1347,64 @@ client.copilot_for_your_enterprise.async_video_bots()
-**quality:** `typing.Optional[float]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+ +
-
-
-**max_tokens:** `typing.Optional[int]` -
+
+
client.bulk_run(...)
-**sampling_temperature:** `typing.Optional[float]` - -
-
+#### 🔌 Usage
-**task_instructions:** `typing.Optional[str]` - +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.bulk_run( + documents=["documents"], + run_urls=["run_urls"], + input_columns={"key": "value"}, + output_columns={"key": "value"}, +) + +``` +
+
+#### ⚙️ Parameters +
-**query_instructions:** `typing.Optional[str]` +
+
+ +**documents:** `typing.Sequence[str]` + + +Upload or link to a CSV or google sheet that contains your sample input data. +For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. +Remember to includes header names in your CSV too. +
@@ -1363,7 +1412,12 @@ client.copilot_for_your_enterprise.async_video_bots()
-**keyword_instructions:** `typing.Optional[str]` +**run_urls:** `typing.Sequence[str]` + + +Provide one or more Gooey.AI workflow runs. +You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them. +
@@ -1371,7 +1425,11 @@ client.copilot_for_your_enterprise.async_video_bots()
-**documents:** `typing.Optional[typing.Sequence[str]]` +**input_columns:** `typing.Dict[str, str]` + + +For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it. +
@@ -1379,7 +1437,11 @@ client.copilot_for_your_enterprise.async_video_bots()
-**max_references:** `typing.Optional[int]` +**output_columns:** `typing.Dict[str, str]` + + +For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data. +
@@ -1387,7 +1449,7 @@ client.copilot_for_your_enterprise.async_video_bots()
-**max_context_words:** `typing.Optional[int]` +**example_id:** `typing.Optional[str]`
@@ -1395,7 +1457,7 @@ client.copilot_for_your_enterprise.async_video_bots()
-**scroll_jump:** `typing.Optional[int]` +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -1403,7 +1465,7 @@ client.copilot_for_your_enterprise.async_video_bots()
-**embedding_model:** `typing.Optional[VideoBotsPageRequestEmbeddingModel]` +**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -1411,10 +1473,11 @@ client.copilot_for_your_enterprise.async_video_bots()
-**dense_weight:** `typing.Optional[float]` +**eval_urls:** `typing.Optional[typing.Sequence[str]]` -Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. -Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + +_(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs. +
@@ -1422,7 +1485,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**citation_style:** `typing.Optional[VideoBotsPageRequestCitationStyle]` +**settings:** `typing.Optional[RunSettings]`
@@ -1430,55 +1493,55 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**use_url_shortener:** `typing.Optional[bool]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
-
-**asr_model:** `typing.Optional[VideoBotsPageRequestAsrModel]` — Choose a model to transcribe incoming audio messages to text. -
+
+
client.synthesize_data(...)
-**asr_language:** `typing.Optional[str]` — Choose a language to transcribe incoming audio messages to text. - -
-
+#### 🔌 Usage
-**translation_model:** `typing.Optional[VideoBotsPageRequestTranslationModel]` - -
-
-
-**user_language:** `typing.Optional[str]` — Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. - +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.synthesize_data( + documents=["documents"], +) + +```
+ + + +#### ⚙️ Parameters
-**input_glossary_document:** `typing.Optional[str]` — Translation Glossary for User Langauge -> LLM Language (English) - -
-
-
-**output_glossary_document:** `typing.Optional[str]` — Translation Glossary for LLM Language (English) -> User Langauge +**documents:** `typing.Sequence[str]`
@@ -1486,7 +1549,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**lipsync_model:** `typing.Optional[VideoBotsPageRequestLipsyncModel]` +**example_id:** `typing.Optional[str]`
@@ -1494,7 +1557,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**tools:** `typing.Optional[typing.Sequence[LlmTools]]` — Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -1502,7 +1565,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**tts_provider:** `typing.Optional[VideoBotsPageRequestTtsProvider]` +**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -1510,7 +1573,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**uberduck_voice_name:** `typing.Optional[str]` +**sheet_url:** `typing.Optional[str]`
@@ -1518,7 +1581,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**uberduck_speaking_rate:** `typing.Optional[float]` +**selected_asr_model:** `typing.Optional[DocExtractPageRequestSelectedAsrModel]`
@@ -1526,7 +1589,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**google_voice_name:** `typing.Optional[str]` +**google_translate_target:** `typing.Optional[str]`
@@ -1534,7 +1597,10 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**google_speaking_rate:** `typing.Optional[float]` +**glossary_document:** `typing.Optional[str]` + +Provide a glossary to customize translation and improve accuracy of domain-specific terms. +If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
@@ -1542,7 +1608,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**google_pitch:** `typing.Optional[float]` +**task_instructions:** `typing.Optional[str]`
@@ -1550,7 +1616,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**bark_history_prompt:** `typing.Optional[str]` +**selected_model:** `typing.Optional[DocExtractPageRequestSelectedModel]`
@@ -1558,7 +1624,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead +**avoid_repetition:** `typing.Optional[bool]`
@@ -1566,7 +1632,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**elevenlabs_api_key:** `typing.Optional[str]` +**num_outputs:** `typing.Optional[int]`
@@ -1574,7 +1640,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**elevenlabs_voice_id:** `typing.Optional[str]` +**quality:** `typing.Optional[float]`
@@ -1582,7 +1648,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**elevenlabs_model:** `typing.Optional[str]` +**max_tokens:** `typing.Optional[int]`
@@ -1590,7 +1656,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**elevenlabs_stability:** `typing.Optional[float]` +**sampling_temperature:** `typing.Optional[float]`
@@ -1598,7 +1664,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**elevenlabs_similarity_boost:** `typing.Optional[float]` +**response_format_type:** `typing.Optional[DocExtractPageRequestResponseFormatType]`
@@ -1606,7 +1672,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**elevenlabs_style:** `typing.Optional[float]` +**settings:** `typing.Optional[RunSettings]`
@@ -1614,39 +1680,53 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**elevenlabs_speaker_boost:** `typing.Optional[bool]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+ + -
-
-**azure_voice_name:** `typing.Optional[str]` -
+
+
client.llm(...)
-**openai_voice_name:** `typing.Optional[VideoBotsPageRequestOpenaiVoiceName]` - -
-
+#### 🔌 Usage
-**openai_tts_model:** `typing.Optional[VideoBotsPageRequestOpenaiTtsModel]` - +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.llm() + +``` +
+
+#### ⚙️ Parameters +
-**input_face:** `typing.Optional[str]` +
+
+ +**example_id:** `typing.Optional[str]`
@@ -1654,7 +1734,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**face_padding_top:** `typing.Optional[int]` +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -1662,7 +1742,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**face_padding_bottom:** `typing.Optional[int]` +**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -1670,7 +1750,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**face_padding_left:** `typing.Optional[int]` +**input_prompt:** `typing.Optional[str]`
@@ -1678,7 +1758,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**face_padding_right:** `typing.Optional[int]` +**selected_models:** `typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]]`
@@ -1686,7 +1766,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**sadtalker_settings:** `typing.Optional[SadTalkerSettings]` +**avoid_repetition:** `typing.Optional[bool]`
@@ -1694,7 +1774,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**settings:** `typing.Optional[RunSettings]` +**num_outputs:** `typing.Optional[int]`
@@ -1702,56 +1782,39 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**quality:** `typing.Optional[float]`
-
-
+
+
+**max_tokens:** `typing.Optional[int]` +
-
-
client.copilot_for_your_enterprise.status_video_bots(...)
-#### 🔌 Usage - -
-
+**sampling_temperature:** `typing.Optional[float]` + +
+
-```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.copilot_for_your_enterprise.status_video_bots( - run_id="run_id", -) - -``` -
-
+**response_format_type:** `typing.Optional[CompareLlmPageRequestResponseFormatType]` +
-#### ⚙️ Parameters - -
-
-
-**run_id:** `str` +**settings:** `typing.Optional[RunSettings]`
@@ -1771,8 +1834,7 @@ client.copilot_for_your_enterprise.status_video_bots(
-## AiAnimationGenerator -
client.ai_animation_generator.deforum_sd(...) +
client.rag(...)
@@ -1785,19 +1847,13 @@ client.copilot_for_your_enterprise.status_video_bots(
```python -from gooey import AnimationPrompt, Gooey +from gooey import Gooey client = Gooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) -client.ai_animation_generator.deforum_sd( - animation_prompts=[ - AnimationPrompt( - frame="frame", - prompt="prompt", - ) - ], +client.rag( + search_query="search_query", ) ``` @@ -1814,7 +1870,7 @@ client.ai_animation_generator.deforum_sd(
-**animation_prompts:** `typing.Sequence[AnimationPrompt]` +**search_query:** `str`
@@ -1822,7 +1878,7 @@ client.ai_animation_generator.deforum_sd(
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` +**example_id:** `typing.Optional[str]`
@@ -1830,7 +1886,7 @@ client.ai_animation_generator.deforum_sd(
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -1838,7 +1894,7 @@ client.ai_animation_generator.deforum_sd(
-**max_frames:** `typing.Optional[int]` +**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -1846,7 +1902,7 @@ client.ai_animation_generator.deforum_sd(
-**selected_model:** `typing.Optional[DeforumSdPageRequestSelectedModel]` +**keyword_query:** `typing.Optional[DocSearchPageRequestKeywordQuery]`
@@ -1854,7 +1910,7 @@ client.ai_animation_generator.deforum_sd(
-**animation_mode:** `typing.Optional[str]` +**documents:** `typing.Optional[typing.Sequence[str]]`
@@ -1862,7 +1918,7 @@ client.ai_animation_generator.deforum_sd(
-**zoom:** `typing.Optional[str]` +**max_references:** `typing.Optional[int]`
@@ -1870,7 +1926,7 @@ client.ai_animation_generator.deforum_sd(
-**translation_x:** `typing.Optional[str]` +**max_context_words:** `typing.Optional[int]`
@@ -1878,7 +1934,7 @@ client.ai_animation_generator.deforum_sd(
-**translation_y:** `typing.Optional[str]` +**scroll_jump:** `typing.Optional[int]`
@@ -1886,7 +1942,7 @@ client.ai_animation_generator.deforum_sd(
-**rotation3d_x:** `typing.Optional[str]` +**doc_extract_url:** `typing.Optional[str]`
@@ -1894,7 +1950,7 @@ client.ai_animation_generator.deforum_sd(
-**rotation3d_y:** `typing.Optional[str]` +**embedding_model:** `typing.Optional[DocSearchPageRequestEmbeddingModel]`
@@ -1902,7 +1958,12 @@ client.ai_animation_generator.deforum_sd(
-**rotation3d_z:** `typing.Optional[str]` +**dense_weight:** `typing.Optional[float]` + + +Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. +Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. +
@@ -1910,7 +1971,7 @@ client.ai_animation_generator.deforum_sd(
-**fps:** `typing.Optional[int]` +**task_instructions:** `typing.Optional[str]`
@@ -1918,7 +1979,71 @@ client.ai_animation_generator.deforum_sd(
-**seed:** `typing.Optional[int]` +**query_instructions:** `typing.Optional[str]` + +
+
+ +
+
+ +**selected_model:** `typing.Optional[DocSearchPageRequestSelectedModel]` + +
+
+ +
+
+ +**citation_style:** `typing.Optional[DocSearchPageRequestCitationStyle]` + +
+
+ +
+
+ +**avoid_repetition:** `typing.Optional[bool]` + +
+
+ +
+
+ +**num_outputs:** `typing.Optional[int]` + +
+
+ +
+
+ +**quality:** `typing.Optional[float]` + +
+
+ +
+
+ +**max_tokens:** `typing.Optional[int]` + +
+
+ +
+
+ +**sampling_temperature:** `typing.Optional[float]` + +
+
+ +
+
+ +**response_format_type:** `typing.Optional[DocSearchPageRequestResponseFormatType]`
@@ -1946,7 +2071,7 @@ client.ai_animation_generator.deforum_sd(
-
client.ai_animation_generator.async_deforum_sd(...) +
client.doc_summary(...)
@@ -1959,19 +2084,13 @@ client.ai_animation_generator.deforum_sd(
```python -from gooey import AnimationPrompt, Gooey +from gooey import Gooey client = Gooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) -client.ai_animation_generator.async_deforum_sd( - animation_prompts=[ - AnimationPrompt( - frame="frame", - prompt="prompt", - ) - ], +client.doc_summary( + documents=["documents"], ) ``` @@ -1988,7 +2107,7 @@ client.ai_animation_generator.async_deforum_sd(
-**animation_prompts:** `typing.Sequence[AnimationPrompt]` +**documents:** `typing.Sequence[str]`
@@ -1996,7 +2115,7 @@ client.ai_animation_generator.async_deforum_sd(
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` +**example_id:** `typing.Optional[str]`
@@ -2004,7 +2123,7 @@ client.ai_animation_generator.async_deforum_sd(
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -2012,7 +2131,7 @@ client.ai_animation_generator.async_deforum_sd(
-**max_frames:** `typing.Optional[int]` +**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -2020,7 +2139,7 @@ client.ai_animation_generator.async_deforum_sd(
-**selected_model:** `typing.Optional[DeforumSdPageRequestSelectedModel]` +**task_instructions:** `typing.Optional[str]`
@@ -2028,7 +2147,7 @@ client.ai_animation_generator.async_deforum_sd(
-**animation_mode:** `typing.Optional[str]` +**merge_instructions:** `typing.Optional[str]`
@@ -2036,7 +2155,7 @@ client.ai_animation_generator.async_deforum_sd(
-**zoom:** `typing.Optional[str]` +**selected_model:** `typing.Optional[DocSummaryPageRequestSelectedModel]`
@@ -2044,7 +2163,7 @@ client.ai_animation_generator.async_deforum_sd(
-**translation_x:** `typing.Optional[str]` +**chain_type:** `typing.Optional[typing.Literal["map_reduce"]]`
@@ -2052,7 +2171,7 @@ client.ai_animation_generator.async_deforum_sd(
-**translation_y:** `typing.Optional[str]` +**selected_asr_model:** `typing.Optional[DocSummaryPageRequestSelectedAsrModel]`
@@ -2060,7 +2179,7 @@ client.ai_animation_generator.async_deforum_sd(
-**rotation3d_x:** `typing.Optional[str]` +**google_translate_target:** `typing.Optional[str]`
@@ -2068,7 +2187,7 @@ client.ai_animation_generator.async_deforum_sd(
-**rotation3d_y:** `typing.Optional[str]` +**avoid_repetition:** `typing.Optional[bool]`
@@ -2076,7 +2195,7 @@ client.ai_animation_generator.async_deforum_sd(
-**rotation3d_z:** `typing.Optional[str]` +**num_outputs:** `typing.Optional[int]`
@@ -2084,7 +2203,7 @@ client.ai_animation_generator.async_deforum_sd(
-**fps:** `typing.Optional[int]` +**quality:** `typing.Optional[float]`
@@ -2092,7 +2211,7 @@ client.ai_animation_generator.async_deforum_sd(
-**seed:** `typing.Optional[int]` +**max_tokens:** `typing.Optional[int]`
@@ -2100,7 +2219,7 @@ client.ai_animation_generator.async_deforum_sd(
-**settings:** `typing.Optional[RunSettings]` +**sampling_temperature:** `typing.Optional[float]`
@@ -2108,56 +2227,15 @@ client.ai_animation_generator.async_deforum_sd(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**response_format_type:** `typing.Optional[DocSummaryPageRequestResponseFormatType]`
-
-
- - - - -
- -
client.ai_animation_generator.status_deforum_sd(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.ai_animation_generator.status_deforum_sd( - run_id="run_id", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
-**run_id:** `str` +**settings:** `typing.Optional[RunSettings]`
@@ -2177,8 +2255,7 @@ client.ai_animation_generator.status_deforum_sd(
-## AiArtQrCode -
client.ai_art_qr_code.art_qr_code(...) +
client.lipsync_tts(...)
@@ -2194,10 +2271,9 @@ client.ai_animation_generator.status_deforum_sd( from gooey import Gooey client = Gooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) -client.ai_art_qr_code.art_qr_code( +client.lipsync_tts( text_prompt="text_prompt", ) @@ -2223,6 +2299,14 @@ client.ai_art_qr_code.art_qr_code(
+**example_id:** `typing.Optional[str]` + +
+
+ +
+
+ **functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -2239,7 +2323,7 @@ client.ai_art_qr_code.art_qr_code(
-**qr_code_data:** `typing.Optional[str]` +**tts_provider:** `typing.Optional[LipsyncTtsPageRequestTtsProvider]`
@@ -2247,7 +2331,7 @@ client.ai_art_qr_code.art_qr_code(
-**qr_code_input_image:** `typing.Optional[str]` +**uberduck_voice_name:** `typing.Optional[str]`
@@ -2255,7 +2339,7 @@ client.ai_art_qr_code.art_qr_code(
-**qr_code_vcard:** `typing.Optional[Vcard]` +**uberduck_speaking_rate:** `typing.Optional[float]`
@@ -2263,7 +2347,7 @@ client.ai_art_qr_code.art_qr_code(
-**qr_code_file:** `typing.Optional[str]` +**google_voice_name:** `typing.Optional[str]`
@@ -2271,7 +2355,7 @@ client.ai_art_qr_code.art_qr_code(
-**use_url_shortener:** `typing.Optional[bool]` +**google_speaking_rate:** `typing.Optional[float]`
@@ -2279,7 +2363,7 @@ client.ai_art_qr_code.art_qr_code(
-**negative_prompt:** `typing.Optional[str]` +**google_pitch:** `typing.Optional[float]`
@@ -2287,7 +2371,7 @@ client.ai_art_qr_code.art_qr_code(
-**image_prompt:** `typing.Optional[str]` +**bark_history_prompt:** `typing.Optional[str]`
@@ -2295,9 +2379,7 @@ client.ai_art_qr_code.art_qr_code(
-**image_prompt_controlnet_models:** `typing.Optional[ - typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem] -]` +**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead
@@ -2305,7 +2387,7 @@ client.ai_art_qr_code.art_qr_code(
-**image_prompt_strength:** `typing.Optional[float]` +**elevenlabs_api_key:** `typing.Optional[str]`
@@ -2313,7 +2395,7 @@ client.ai_art_qr_code.art_qr_code(
-**image_prompt_scale:** `typing.Optional[float]` +**elevenlabs_voice_id:** `typing.Optional[str]`
@@ -2321,7 +2403,7 @@ client.ai_art_qr_code.art_qr_code(
-**image_prompt_pos_x:** `typing.Optional[float]` +**elevenlabs_model:** `typing.Optional[str]`
@@ -2329,7 +2411,7 @@ client.ai_art_qr_code.art_qr_code(
-**image_prompt_pos_y:** `typing.Optional[float]` +**elevenlabs_stability:** `typing.Optional[float]`
@@ -2337,7 +2419,7 @@ client.ai_art_qr_code.art_qr_code(
-**selected_model:** `typing.Optional[QrCodeGeneratorPageRequestSelectedModel]` +**elevenlabs_similarity_boost:** `typing.Optional[float]`
@@ -2345,9 +2427,7 @@ client.ai_art_qr_code.art_qr_code(
-**selected_controlnet_model:** `typing.Optional[ - typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem] -]` +**elevenlabs_style:** `typing.Optional[float]`
@@ -2355,7 +2435,7 @@ client.ai_art_qr_code.art_qr_code(
-**output_width:** `typing.Optional[int]` +**elevenlabs_speaker_boost:** `typing.Optional[bool]`
@@ -2363,7 +2443,7 @@ client.ai_art_qr_code.art_qr_code(
-**output_height:** `typing.Optional[int]` +**azure_voice_name:** `typing.Optional[str]`
@@ -2371,7 +2451,7 @@ client.ai_art_qr_code.art_qr_code(
-**guidance_scale:** `typing.Optional[float]` +**openai_voice_name:** `typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName]`
@@ -2379,7 +2459,7 @@ client.ai_art_qr_code.art_qr_code(
-**controlnet_conditioning_scale:** `typing.Optional[typing.Sequence[float]]` +**openai_tts_model:** `typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel]`
@@ -2387,7 +2467,7 @@ client.ai_art_qr_code.art_qr_code(
-**num_outputs:** `typing.Optional[int]` +**input_face:** `typing.Optional[str]`
@@ -2395,7 +2475,7 @@ client.ai_art_qr_code.art_qr_code(
-**quality:** `typing.Optional[int]` +**face_padding_top:** `typing.Optional[int]`
@@ -2403,7 +2483,7 @@ client.ai_art_qr_code.art_qr_code(
-**scheduler:** `typing.Optional[QrCodeGeneratorPageRequestScheduler]` +**face_padding_bottom:** `typing.Optional[int]`
@@ -2411,7 +2491,7 @@ client.ai_art_qr_code.art_qr_code(
-**seed:** `typing.Optional[int]` +**face_padding_left:** `typing.Optional[int]`
@@ -2419,7 +2499,7 @@ client.ai_art_qr_code.art_qr_code(
-**obj_scale:** `typing.Optional[float]` +**face_padding_right:** `typing.Optional[int]`
@@ -2427,7 +2507,7 @@ client.ai_art_qr_code.art_qr_code(
-**obj_pos_x:** `typing.Optional[float]` +**sadtalker_settings:** `typing.Optional[SadTalkerSettings]`
@@ -2435,7 +2515,7 @@ client.ai_art_qr_code.art_qr_code(
-**obj_pos_y:** `typing.Optional[float]` +**selected_model:** `typing.Optional[LipsyncTtsPageRequestSelectedModel]`
@@ -2463,7 +2543,7 @@ client.ai_art_qr_code.art_qr_code(
-
client.ai_art_qr_code.async_art_qr_code(...) +
client.text_to_speech(...)
@@ -2479,10 +2559,9 @@ client.ai_art_qr_code.art_qr_code( from gooey import Gooey client = Gooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) -client.ai_art_qr_code.async_art_qr_code( +client.text_to_speech( text_prompt="text_prompt", ) @@ -2508,7 +2587,7 @@ client.ai_art_qr_code.async_art_qr_code(
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` +**example_id:** `typing.Optional[str]`
@@ -2516,7 +2595,7 @@ client.ai_art_qr_code.async_art_qr_code(
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -2524,7 +2603,7 @@ client.ai_art_qr_code.async_art_qr_code(
-**qr_code_data:** `typing.Optional[str]` +**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -2532,7 +2611,7 @@ client.ai_art_qr_code.async_art_qr_code(
-**qr_code_input_image:** `typing.Optional[str]` +**tts_provider:** `typing.Optional[TextToSpeechPageRequestTtsProvider]`
@@ -2540,7 +2619,7 @@ client.ai_art_qr_code.async_art_qr_code(
-**qr_code_vcard:** `typing.Optional[Vcard]` +**uberduck_voice_name:** `typing.Optional[str]`
@@ -2548,7 +2627,7 @@ client.ai_art_qr_code.async_art_qr_code(
-**qr_code_file:** `typing.Optional[str]` +**uberduck_speaking_rate:** `typing.Optional[float]`
@@ -2556,7 +2635,7 @@ client.ai_art_qr_code.async_art_qr_code(
-**use_url_shortener:** `typing.Optional[bool]` +**google_voice_name:** `typing.Optional[str]`
@@ -2564,7 +2643,7 @@ client.ai_art_qr_code.async_art_qr_code(
-**negative_prompt:** `typing.Optional[str]` +**google_speaking_rate:** `typing.Optional[float]`
@@ -2572,7 +2651,7 @@ client.ai_art_qr_code.async_art_qr_code(
-**image_prompt:** `typing.Optional[str]` +**google_pitch:** `typing.Optional[float]`
@@ -2580,9 +2659,7 @@ client.ai_art_qr_code.async_art_qr_code(
-**image_prompt_controlnet_models:** `typing.Optional[ - typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem] -]` +**bark_history_prompt:** `typing.Optional[str]`
@@ -2590,7 +2667,7 @@ client.ai_art_qr_code.async_art_qr_code(
-**image_prompt_strength:** `typing.Optional[float]` +**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead
@@ -2598,7 +2675,7 @@ client.ai_art_qr_code.async_art_qr_code(
-**image_prompt_scale:** `typing.Optional[float]` +**elevenlabs_api_key:** `typing.Optional[str]`
@@ -2606,7 +2683,7 @@ client.ai_art_qr_code.async_art_qr_code(
-**image_prompt_pos_x:** `typing.Optional[float]` +**elevenlabs_voice_id:** `typing.Optional[str]`
@@ -2614,7 +2691,7 @@ client.ai_art_qr_code.async_art_qr_code(
-**image_prompt_pos_y:** `typing.Optional[float]` +**elevenlabs_model:** `typing.Optional[str]`
@@ -2622,7 +2699,7 @@ client.ai_art_qr_code.async_art_qr_code(
-**selected_model:** `typing.Optional[QrCodeGeneratorPageRequestSelectedModel]` +**elevenlabs_stability:** `typing.Optional[float]`
@@ -2630,9 +2707,7 @@ client.ai_art_qr_code.async_art_qr_code(
-**selected_controlnet_model:** `typing.Optional[ - typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem] -]` +**elevenlabs_similarity_boost:** `typing.Optional[float]`
@@ -2640,7 +2715,7 @@ client.ai_art_qr_code.async_art_qr_code(
-**output_width:** `typing.Optional[int]` +**elevenlabs_style:** `typing.Optional[float]`
@@ -2648,7 +2723,7 @@ client.ai_art_qr_code.async_art_qr_code(
-**output_height:** `typing.Optional[int]` +**elevenlabs_speaker_boost:** `typing.Optional[bool]`
@@ -2656,7 +2731,7 @@ client.ai_art_qr_code.async_art_qr_code(
-**guidance_scale:** `typing.Optional[float]` +**azure_voice_name:** `typing.Optional[str]`
@@ -2664,7 +2739,7 @@ client.ai_art_qr_code.async_art_qr_code(
-**controlnet_conditioning_scale:** `typing.Optional[typing.Sequence[float]]` +**openai_voice_name:** `typing.Optional[TextToSpeechPageRequestOpenaiVoiceName]`
@@ -2672,7 +2747,7 @@ client.ai_art_qr_code.async_art_qr_code(
-**num_outputs:** `typing.Optional[int]` +**openai_tts_model:** `typing.Optional[TextToSpeechPageRequestOpenaiTtsModel]`
@@ -2680,7 +2755,7 @@ client.ai_art_qr_code.async_art_qr_code(
-**quality:** `typing.Optional[int]` +**settings:** `typing.Optional[RunSettings]`
@@ -2688,23 +2763,55 @@ client.ai_art_qr_code.async_art_qr_code(
-**scheduler:** `typing.Optional[QrCodeGeneratorPageRequestScheduler]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+ + + + +
+
client.speech_recognition(...)
-**seed:** `typing.Optional[int]` - +#### 🔌 Usage + +
+
+ +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.speech_recognition( + documents=["documents"], +) + +``` +
+
+#### ⚙️ Parameters +
-**obj_scale:** `typing.Optional[float]` +
+
+ +**documents:** `typing.Sequence[str]`
@@ -2712,7 +2819,7 @@ client.ai_art_qr_code.async_art_qr_code(
-**obj_pos_x:** `typing.Optional[float]` +**example_id:** `typing.Optional[str]`
@@ -2720,7 +2827,7 @@ client.ai_art_qr_code.async_art_qr_code(
-**obj_pos_y:** `typing.Optional[float]` +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -2728,7 +2835,7 @@ client.ai_art_qr_code.async_art_qr_code(
-**settings:** `typing.Optional[RunSettings]` +**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -2736,56 +2843,74 @@ client.ai_art_qr_code.async_art_qr_code(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**selected_model:** `typing.Optional[AsrPageRequestSelectedModel]`
-
-
+
+
+**language:** `typing.Optional[str]` +
-
-
client.ai_art_qr_code.status_art_qr_code(...)
-#### 🔌 Usage +**translation_model:** `typing.Optional[AsrPageRequestTranslationModel]` + +
+
+**output_format:** `typing.Optional[AsrPageRequestOutputFormat]` + +
+
+
-```python -from gooey import Gooey +**google_translate_target:** `typing.Optional[str]` — use `translation_model` & `translation_target` instead. + +
+
-client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.ai_art_qr_code.status_art_qr_code( - run_id="run_id", -) +
+
-``` +**translation_source:** `typing.Optional[str]` +
+ +
+
+ +**translation_target:** `typing.Optional[str]` +
-#### ⚙️ Parameters -
+**glossary_document:** `typing.Optional[str]` + +Provide a glossary to customize translation and improve accuracy of domain-specific terms. +If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). + +
+
+
-**run_id:** `str` +**settings:** `typing.Optional[RunSettings]`
@@ -2805,8 +2930,7 @@ client.ai_art_qr_code.status_art_qr_code(
-## GeneratePeopleAlsoAskSeoContent -
client.generate_people_also_ask_seo_content.related_qna_maker(...) +
client.text_to_music(...)
@@ -2822,12 +2946,10 @@ client.ai_art_qr_code.status_art_qr_code( from gooey import Gooey client = Gooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) -client.generate_people_also_ask_seo_content.related_qna_maker( - search_query="search_query", - site_filter="site_filter", +client.text_to_music( + text_prompt="text_prompt", ) ``` @@ -2844,7 +2966,7 @@ client.generate_people_also_ask_seo_content.related_qna_maker(
-**search_query:** `str` +**text_prompt:** `str`
@@ -2852,7 +2974,7 @@ client.generate_people_also_ask_seo_content.related_qna_maker(
-**site_filter:** `str` +**example_id:** `typing.Optional[str]`
@@ -2876,7 +2998,7 @@ client.generate_people_also_ask_seo_content.related_qna_maker(
-**serp_search_location:** `typing.Optional[SerpSearchLocation]` +**negative_prompt:** `typing.Optional[str]`
@@ -2884,7 +3006,7 @@ client.generate_people_also_ask_seo_content.related_qna_maker(
-**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead +**duration_sec:** `typing.Optional[float]`
@@ -2892,7 +3014,7 @@ client.generate_people_also_ask_seo_content.related_qna_maker(
-**serp_search_type:** `typing.Optional[SerpSearchType]` +**num_outputs:** `typing.Optional[int]`
@@ -2900,7 +3022,7 @@ client.generate_people_also_ask_seo_content.related_qna_maker(
-**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead +**quality:** `typing.Optional[int]`
@@ -2908,7 +3030,7 @@ client.generate_people_also_ask_seo_content.related_qna_maker(
-**task_instructions:** `typing.Optional[str]` +**guidance_scale:** `typing.Optional[float]`
@@ -2916,7 +3038,7 @@ client.generate_people_also_ask_seo_content.related_qna_maker(
-**query_instructions:** `typing.Optional[str]` +**seed:** `typing.Optional[int]`
@@ -2924,7 +3046,7 @@ client.generate_people_also_ask_seo_content.related_qna_maker(
-**selected_model:** `typing.Optional[RelatedQnAPageRequestSelectedModel]` +**sd2upscaling:** `typing.Optional[bool]`
@@ -2932,7 +3054,7 @@ client.generate_people_also_ask_seo_content.related_qna_maker(
-**avoid_repetition:** `typing.Optional[bool]` +**selected_models:** `typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]]`
@@ -2940,7 +3062,7 @@ client.generate_people_also_ask_seo_content.related_qna_maker(
-**num_outputs:** `typing.Optional[int]` +**settings:** `typing.Optional[RunSettings]`
@@ -2948,15 +3070,53 @@ client.generate_people_also_ask_seo_content.related_qna_maker(
-**quality:** `typing.Optional[float]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+ + + + +
+
client.translate(...)
-**max_tokens:** `typing.Optional[int]` +#### 🔌 Usage + +
+
+ +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.translate() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**example_id:** `typing.Optional[str]`
@@ -2964,7 +3124,7 @@ client.generate_people_also_ask_seo_content.related_qna_maker(
-**sampling_temperature:** `typing.Optional[float]` +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -2972,7 +3132,7 @@ client.generate_people_also_ask_seo_content.related_qna_maker(
-**max_search_urls:** `typing.Optional[int]` +**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -2980,7 +3140,7 @@ client.generate_people_also_ask_seo_content.related_qna_maker(
-**max_references:** `typing.Optional[int]` +**texts:** `typing.Optional[typing.Sequence[str]]`
@@ -2988,7 +3148,7 @@ client.generate_people_also_ask_seo_content.related_qna_maker(
-**max_context_words:** `typing.Optional[int]` +**selected_model:** `typing.Optional[TranslationPageRequestSelectedModel]`
@@ -2996,7 +3156,7 @@ client.generate_people_also_ask_seo_content.related_qna_maker(
-**scroll_jump:** `typing.Optional[int]` +**translation_source:** `typing.Optional[str]`
@@ -3004,7 +3164,7 @@ client.generate_people_also_ask_seo_content.related_qna_maker(
-**embedding_model:** `typing.Optional[RelatedQnAPageRequestEmbeddingModel]` +**translation_target:** `typing.Optional[str]`
@@ -3012,10 +3172,10 @@ client.generate_people_also_ask_seo_content.related_qna_maker(
-**dense_weight:** `typing.Optional[float]` +**glossary_document:** `typing.Optional[str]` -Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. -Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. +Provide a glossary to customize translation and improve accuracy of domain-specific terms. +If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
@@ -3043,7 +3203,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-
client.generate_people_also_ask_seo_content.async_related_qna_maker(...) +
client.remix_image(...)
@@ -3059,12 +3219,10 @@ Generally speaking, dense embeddings excel at understanding the context of the q from gooey import Gooey client = Gooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) -client.generate_people_also_ask_seo_content.async_related_qna_maker( - search_query="search_query", - site_filter="site_filter", +client.remix_image( + input_image="input_image", ) ``` @@ -3081,7 +3239,7 @@ client.generate_people_also_ask_seo_content.async_related_qna_maker(
-**search_query:** `str` +**input_image:** `str`
@@ -3089,7 +3247,7 @@ client.generate_people_also_ask_seo_content.async_related_qna_maker(
-**site_filter:** `str` +**example_id:** `typing.Optional[str]`
@@ -3113,7 +3271,7 @@ client.generate_people_also_ask_seo_content.async_related_qna_maker(
-**serp_search_location:** `typing.Optional[SerpSearchLocation]` +**text_prompt:** `typing.Optional[str]`
@@ -3121,7 +3279,7 @@ client.generate_people_also_ask_seo_content.async_related_qna_maker(
-**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead +**selected_model:** `typing.Optional[Img2ImgPageRequestSelectedModel]`
@@ -3129,7 +3287,7 @@ client.generate_people_also_ask_seo_content.async_related_qna_maker(
-**serp_search_type:** `typing.Optional[SerpSearchType]` +**selected_controlnet_model:** `typing.Optional[Img2ImgPageRequestSelectedControlnetModel]`
@@ -3137,7 +3295,7 @@ client.generate_people_also_ask_seo_content.async_related_qna_maker(
-**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead +**negative_prompt:** `typing.Optional[str]`
@@ -3145,7 +3303,7 @@ client.generate_people_also_ask_seo_content.async_related_qna_maker(
-**task_instructions:** `typing.Optional[str]` +**num_outputs:** `typing.Optional[int]`
@@ -3153,7 +3311,7 @@ client.generate_people_also_ask_seo_content.async_related_qna_maker(
-**query_instructions:** `typing.Optional[str]` +**quality:** `typing.Optional[int]`
@@ -3161,7 +3319,7 @@ client.generate_people_also_ask_seo_content.async_related_qna_maker(
-**selected_model:** `typing.Optional[RelatedQnAPageRequestSelectedModel]` +**output_width:** `typing.Optional[int]`
@@ -3169,7 +3327,7 @@ client.generate_people_also_ask_seo_content.async_related_qna_maker(
-**avoid_repetition:** `typing.Optional[bool]` +**output_height:** `typing.Optional[int]`
@@ -3177,7 +3335,7 @@ client.generate_people_also_ask_seo_content.async_related_qna_maker(
-**num_outputs:** `typing.Optional[int]` +**guidance_scale:** `typing.Optional[float]`
@@ -3185,7 +3343,7 @@ client.generate_people_also_ask_seo_content.async_related_qna_maker(
-**quality:** `typing.Optional[float]` +**prompt_strength:** `typing.Optional[float]`
@@ -3193,7 +3351,7 @@ client.generate_people_also_ask_seo_content.async_related_qna_maker(
-**max_tokens:** `typing.Optional[int]` +**controlnet_conditioning_scale:** `typing.Optional[typing.Sequence[float]]`
@@ -3201,7 +3359,7 @@ client.generate_people_also_ask_seo_content.async_related_qna_maker(
-**sampling_temperature:** `typing.Optional[float]` +**seed:** `typing.Optional[int]`
@@ -3209,7 +3367,7 @@ client.generate_people_also_ask_seo_content.async_related_qna_maker(
-**max_search_urls:** `typing.Optional[int]` +**image_guidance_scale:** `typing.Optional[float]`
@@ -3217,7 +3375,7 @@ client.generate_people_also_ask_seo_content.async_related_qna_maker(
-**max_references:** `typing.Optional[int]` +**settings:** `typing.Optional[RunSettings]`
@@ -3225,42 +3383,55 @@ client.generate_people_also_ask_seo_content.async_related_qna_maker(
-**max_context_words:** `typing.Optional[int]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
-
-**scroll_jump:** `typing.Optional[int]` -
+
+
client.text_to_image(...)
-**embedding_model:** `typing.Optional[RelatedQnAPageRequestEmbeddingModel]` - -
-
+#### 🔌 Usage
-**dense_weight:** `typing.Optional[float]` +
+
-Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. -Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.text_to_image( + text_prompt="text_prompt", +) + +``` +
+
+#### ⚙️ Parameters +
-**settings:** `typing.Optional[RunSettings]` +
+
+ +**text_prompt:** `str`
@@ -3268,56 +3439,143 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**example_id:** `typing.Optional[str]`
+ +
+
+ +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` +
+
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +
-
-
client.generate_people_also_ask_seo_content.status_related_qna_maker(...)
-#### 🔌 Usage +**negative_prompt:** `typing.Optional[str]` + +
+
+**output_width:** `typing.Optional[int]` + +
+
+
-```python -from gooey import Gooey +**output_height:** `typing.Optional[int]` + +
+
-client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.generate_people_also_ask_seo_content.status_related_qna_maker( - run_id="run_id", -) +
+
-``` +**num_outputs:** `typing.Optional[int]` + +
+
+ +
+
+ +**quality:** `typing.Optional[int]` +
+ +
+
+ +**dall_e3quality:** `typing.Optional[str]` +
-#### ⚙️ Parameters +
+
+ +**dall_e3style:** `typing.Optional[str]` + +
+
+**guidance_scale:** `typing.Optional[float]` + +
+
+ +
+
+ +**seed:** `typing.Optional[int]` + +
+
+ +
+
+ +**sd2upscaling:** `typing.Optional[bool]` + +
+
+ +
+
+ +**selected_models:** `typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]]` + +
+
+ +
+
+ +**scheduler:** `typing.Optional[CompareText2ImgPageRequestScheduler]` + +
+
+ +
+
+ +**edit_instruction:** `typing.Optional[str]` + +
+
+ +
+
+ +**image_guidance_scale:** `typing.Optional[float]` + +
+
+
-**run_id:** `str` +**settings:** `typing.Optional[RunSettings]`
@@ -3337,8 +3595,7 @@ client.generate_people_also_ask_seo_content.status_related_qna_maker(
-## CreateAPerfectSeoOptimizedTitleParagraph -
client.create_a_perfect_seo_optimized_title_paragraph.seo_summary(...) +
client.product_image(...)
@@ -3354,14 +3611,11 @@ client.generate_people_also_ask_seo_content.status_related_qna_maker( from gooey import Gooey client = Gooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) -client.create_a_perfect_seo_optimized_title_paragraph.seo_summary( - search_query="search_query", - keywords="keywords", - title="title", - company_url="company_url", +client.product_image( + input_image="input_image", + text_prompt="text_prompt", ) ``` @@ -3378,15 +3632,7 @@ client.create_a_perfect_seo_optimized_title_paragraph.seo_summary(
-**search_query:** `str` - -
-
- -
-
- -**keywords:** `str` +**input_image:** `str`
@@ -3394,7 +3640,7 @@ client.create_a_perfect_seo_optimized_title_paragraph.seo_summary(
-**title:** `str` +**text_prompt:** `str`
@@ -3402,7 +3648,7 @@ client.create_a_perfect_seo_optimized_title_paragraph.seo_summary(
-**company_url:** `str` +**example_id:** `typing.Optional[str]`
@@ -3410,7 +3656,7 @@ client.create_a_perfect_seo_optimized_title_paragraph.seo_summary(
-**serp_search_location:** `typing.Optional[SerpSearchLocation]` +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -3418,7 +3664,7 @@ client.create_a_perfect_seo_optimized_title_paragraph.seo_summary(
-**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead +**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -3426,7 +3672,7 @@ client.create_a_perfect_seo_optimized_title_paragraph.seo_summary(
-**serp_search_type:** `typing.Optional[SerpSearchType]` +**obj_scale:** `typing.Optional[float]`
@@ -3434,7 +3680,7 @@ client.create_a_perfect_seo_optimized_title_paragraph.seo_summary(
-**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead +**obj_pos_x:** `typing.Optional[float]`
@@ -3442,7 +3688,7 @@ client.create_a_perfect_seo_optimized_title_paragraph.seo_summary(
-**task_instructions:** `typing.Optional[str]` +**obj_pos_y:** `typing.Optional[float]`
@@ -3450,7 +3696,7 @@ client.create_a_perfect_seo_optimized_title_paragraph.seo_summary(
-**enable_html:** `typing.Optional[bool]` +**mask_threshold:** `typing.Optional[float]`
@@ -3458,7 +3704,7 @@ client.create_a_perfect_seo_optimized_title_paragraph.seo_summary(
-**selected_model:** `typing.Optional[SeoSummaryPageRequestSelectedModel]` +**selected_model:** `typing.Optional[ObjectInpaintingPageRequestSelectedModel]`
@@ -3466,7 +3712,7 @@ client.create_a_perfect_seo_optimized_title_paragraph.seo_summary(
-**sampling_temperature:** `typing.Optional[float]` +**negative_prompt:** `typing.Optional[str]`
@@ -3474,7 +3720,7 @@ client.create_a_perfect_seo_optimized_title_paragraph.seo_summary(
-**max_tokens:** `typing.Optional[int]` +**num_outputs:** `typing.Optional[int]`
@@ -3482,7 +3728,7 @@ client.create_a_perfect_seo_optimized_title_paragraph.seo_summary(
-**num_outputs:** `typing.Optional[int]` +**quality:** `typing.Optional[int]`
@@ -3490,7 +3736,7 @@ client.create_a_perfect_seo_optimized_title_paragraph.seo_summary(
-**quality:** `typing.Optional[float]` +**output_width:** `typing.Optional[int]`
@@ -3498,7 +3744,7 @@ client.create_a_perfect_seo_optimized_title_paragraph.seo_summary(
-**avoid_repetition:** `typing.Optional[bool]` +**output_height:** `typing.Optional[int]`
@@ -3506,7 +3752,7 @@ client.create_a_perfect_seo_optimized_title_paragraph.seo_summary(
-**max_search_urls:** `typing.Optional[int]` +**guidance_scale:** `typing.Optional[float]`
@@ -3514,7 +3760,7 @@ client.create_a_perfect_seo_optimized_title_paragraph.seo_summary(
-**enable_crosslinks:** `typing.Optional[bool]` +**sd2upscaling:** `typing.Optional[bool]`
@@ -3550,7 +3796,7 @@ client.create_a_perfect_seo_optimized_title_paragraph.seo_summary(
-
client.create_a_perfect_seo_optimized_title_paragraph.async_seo_summary(...) +
client.portrait(...)
@@ -3566,14 +3812,11 @@ client.create_a_perfect_seo_optimized_title_paragraph.seo_summary( from gooey import Gooey client = Gooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) -client.create_a_perfect_seo_optimized_title_paragraph.async_seo_summary( - search_query="search_query", - keywords="keywords", - title="title", - company_url="company_url", +client.portrait( + input_image="input_image", + text_prompt="tony stark from the iron man", ) ``` @@ -3590,23 +3833,7 @@ client.create_a_perfect_seo_optimized_title_paragraph.async_seo_summary(
-**search_query:** `str` - -
-
- -
-
- -**keywords:** `str` - -
-
- -
-
- -**title:** `str` +**input_image:** `str`
@@ -3614,7 +3841,7 @@ client.create_a_perfect_seo_optimized_title_paragraph.async_seo_summary(
-**company_url:** `str` +**text_prompt:** `str`
@@ -3622,7 +3849,7 @@ client.create_a_perfect_seo_optimized_title_paragraph.async_seo_summary(
-**serp_search_location:** `typing.Optional[SerpSearchLocation]` +**example_id:** `typing.Optional[str]`
@@ -3630,7 +3857,7 @@ client.create_a_perfect_seo_optimized_title_paragraph.async_seo_summary(
-**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -3638,7 +3865,7 @@ client.create_a_perfect_seo_optimized_title_paragraph.async_seo_summary(
-**serp_search_type:** `typing.Optional[SerpSearchType]` +**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -3646,7 +3873,7 @@ client.create_a_perfect_seo_optimized_title_paragraph.async_seo_summary(
-**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead +**face_scale:** `typing.Optional[float]`
@@ -3654,7 +3881,7 @@ client.create_a_perfect_seo_optimized_title_paragraph.async_seo_summary(
-**task_instructions:** `typing.Optional[str]` +**face_pos_x:** `typing.Optional[float]`
@@ -3662,7 +3889,7 @@ client.create_a_perfect_seo_optimized_title_paragraph.async_seo_summary(
-**enable_html:** `typing.Optional[bool]` +**face_pos_y:** `typing.Optional[float]`
@@ -3670,7 +3897,7 @@ client.create_a_perfect_seo_optimized_title_paragraph.async_seo_summary(
-**selected_model:** `typing.Optional[SeoSummaryPageRequestSelectedModel]` +**selected_model:** `typing.Optional[FaceInpaintingPageRequestSelectedModel]`
@@ -3678,7 +3905,7 @@ client.create_a_perfect_seo_optimized_title_paragraph.async_seo_summary(
-**sampling_temperature:** `typing.Optional[float]` +**negative_prompt:** `typing.Optional[str]`
@@ -3686,7 +3913,7 @@ client.create_a_perfect_seo_optimized_title_paragraph.async_seo_summary(
-**max_tokens:** `typing.Optional[int]` +**num_outputs:** `typing.Optional[int]`
@@ -3694,7 +3921,7 @@ client.create_a_perfect_seo_optimized_title_paragraph.async_seo_summary(
-**num_outputs:** `typing.Optional[int]` +**quality:** `typing.Optional[int]`
@@ -3702,7 +3929,7 @@ client.create_a_perfect_seo_optimized_title_paragraph.async_seo_summary(
-**quality:** `typing.Optional[float]` +**upscale_factor:** `typing.Optional[float]`
@@ -3710,7 +3937,7 @@ client.create_a_perfect_seo_optimized_title_paragraph.async_seo_summary(
-**avoid_repetition:** `typing.Optional[bool]` +**output_width:** `typing.Optional[int]`
@@ -3718,7 +3945,7 @@ client.create_a_perfect_seo_optimized_title_paragraph.async_seo_summary(
-**max_search_urls:** `typing.Optional[int]` +**output_height:** `typing.Optional[int]`
@@ -3726,7 +3953,7 @@ client.create_a_perfect_seo_optimized_title_paragraph.async_seo_summary(
-**enable_crosslinks:** `typing.Optional[bool]` +**guidance_scale:** `typing.Optional[float]`
@@ -3762,7 +3989,7 @@ client.create_a_perfect_seo_optimized_title_paragraph.async_seo_summary(
-
client.create_a_perfect_seo_optimized_title_paragraph.status_seo_summary(...) +
client.image_from_email(...)
@@ -3778,11 +4005,11 @@ client.create_a_perfect_seo_optimized_title_paragraph.async_seo_summary( from gooey import Gooey client = Gooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) -client.create_a_perfect_seo_optimized_title_paragraph.status_seo_summary( - run_id="run_id", +client.image_from_email( + email_address="sean@dara.network", + text_prompt="winter's day in paris", ) ``` @@ -3799,7 +4026,7 @@ client.create_a_perfect_seo_optimized_title_paragraph.status_seo_summary(
-**run_id:** `str` +**text_prompt:** `str`
@@ -3807,82 +4034,55 @@ client.create_a_perfect_seo_optimized_title_paragraph.status_seo_summary(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**example_id:** `typing.Optional[str]`
+ +
+
+ +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` +
+
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +
-
-## WebSearchGpt3 -
client.web_search_gpt3.google_gpt(...)
-#### 🔌 Usage +**email_address:** `typing.Optional[str]` + +
+
+**twitter_handle:** `typing.Optional[str]` + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.web_search_gpt3.google_gpt( - search_query="search_query", - site_filter="site_filter", -) - -``` -
-
- - - -#### ⚙️ Parameters - -
-
- -
-
- -**search_query:** `str` - -
-
- -
-
- -**site_filter:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
+**face_scale:** `typing.Optional[float]` + +
+
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**face_pos_x:** `typing.Optional[float]`
@@ -3890,7 +4090,7 @@ client.web_search_gpt3.google_gpt(
-**serp_search_location:** `typing.Optional[SerpSearchLocation]` +**face_pos_y:** `typing.Optional[float]`
@@ -3898,7 +4098,7 @@ client.web_search_gpt3.google_gpt(
-**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead +**selected_model:** `typing.Optional[EmailFaceInpaintingPageRequestSelectedModel]`
@@ -3906,7 +4106,7 @@ client.web_search_gpt3.google_gpt(
-**serp_search_type:** `typing.Optional[SerpSearchType]` +**negative_prompt:** `typing.Optional[str]`
@@ -3914,7 +4114,7 @@ client.web_search_gpt3.google_gpt(
-**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead +**num_outputs:** `typing.Optional[int]`
@@ -3922,7 +4122,7 @@ client.web_search_gpt3.google_gpt(
-**task_instructions:** `typing.Optional[str]` +**quality:** `typing.Optional[int]`
@@ -3930,7 +4130,7 @@ client.web_search_gpt3.google_gpt(
-**query_instructions:** `typing.Optional[str]` +**upscale_factor:** `typing.Optional[float]`
@@ -3938,7 +4138,7 @@ client.web_search_gpt3.google_gpt(
-**selected_model:** `typing.Optional[GoogleGptPageRequestSelectedModel]` +**output_width:** `typing.Optional[int]`
@@ -3946,7 +4146,7 @@ client.web_search_gpt3.google_gpt(
-**avoid_repetition:** `typing.Optional[bool]` +**output_height:** `typing.Optional[int]`
@@ -3954,7 +4154,7 @@ client.web_search_gpt3.google_gpt(
-**num_outputs:** `typing.Optional[int]` +**guidance_scale:** `typing.Optional[float]`
@@ -3962,7 +4162,7 @@ client.web_search_gpt3.google_gpt(
-**quality:** `typing.Optional[float]` +**should_send_email:** `typing.Optional[bool]`
@@ -3970,7 +4170,7 @@ client.web_search_gpt3.google_gpt(
-**max_tokens:** `typing.Optional[int]` +**email_from:** `typing.Optional[str]`
@@ -3978,7 +4178,7 @@ client.web_search_gpt3.google_gpt(
-**sampling_temperature:** `typing.Optional[float]` +**email_cc:** `typing.Optional[str]`
@@ -3986,7 +4186,7 @@ client.web_search_gpt3.google_gpt(
-**max_search_urls:** `typing.Optional[int]` +**email_bcc:** `typing.Optional[str]`
@@ -3994,7 +4194,7 @@ client.web_search_gpt3.google_gpt(
-**max_references:** `typing.Optional[int]` +**email_subject:** `typing.Optional[str]`
@@ -4002,7 +4202,7 @@ client.web_search_gpt3.google_gpt(
-**max_context_words:** `typing.Optional[int]` +**email_body:** `typing.Optional[str]`
@@ -4010,7 +4210,7 @@ client.web_search_gpt3.google_gpt(
-**scroll_jump:** `typing.Optional[int]` +**email_body_enable_html:** `typing.Optional[bool]`
@@ -4018,7 +4218,7 @@ client.web_search_gpt3.google_gpt(
-**embedding_model:** `typing.Optional[GoogleGptPageRequestEmbeddingModel]` +**fallback_email_body:** `typing.Optional[str]`
@@ -4026,10 +4226,7 @@ client.web_search_gpt3.google_gpt(
-**dense_weight:** `typing.Optional[float]` - -Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. -Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. +**seed:** `typing.Optional[int]`
@@ -4057,7 +4254,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-
client.web_search_gpt3.async_google_gpt(...) +
client.image_from_web_search(...)
@@ -4073,12 +4270,11 @@ Generally speaking, dense embeddings excel at understanding the context of the q from gooey import Gooey client = Gooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) -client.web_search_gpt3.async_google_gpt( +client.image_from_web_search( search_query="search_query", - site_filter="site_filter", + text_prompt="text_prompt", ) ``` @@ -4103,7 +4299,15 @@ client.web_search_gpt3.async_google_gpt(
-**site_filter:** `str` +**text_prompt:** `str` + +
+
+ +
+
+ +**example_id:** `typing.Optional[str]`
@@ -4143,7 +4347,7 @@ client.web_search_gpt3.async_google_gpt(
-**serp_search_type:** `typing.Optional[SerpSearchType]` +**selected_model:** `typing.Optional[GoogleImageGenPageRequestSelectedModel]`
@@ -4151,7 +4355,7 @@ client.web_search_gpt3.async_google_gpt(
-**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead +**negative_prompt:** `typing.Optional[str]`
@@ -4159,7 +4363,7 @@ client.web_search_gpt3.async_google_gpt(
-**task_instructions:** `typing.Optional[str]` +**num_outputs:** `typing.Optional[int]`
@@ -4167,7 +4371,7 @@ client.web_search_gpt3.async_google_gpt(
-**query_instructions:** `typing.Optional[str]` +**quality:** `typing.Optional[int]`
@@ -4175,7 +4379,7 @@ client.web_search_gpt3.async_google_gpt(
-**selected_model:** `typing.Optional[GoogleGptPageRequestSelectedModel]` +**guidance_scale:** `typing.Optional[float]`
@@ -4183,7 +4387,7 @@ client.web_search_gpt3.async_google_gpt(
-**avoid_repetition:** `typing.Optional[bool]` +**prompt_strength:** `typing.Optional[float]`
@@ -4191,7 +4395,7 @@ client.web_search_gpt3.async_google_gpt(
-**num_outputs:** `typing.Optional[int]` +**sd2upscaling:** `typing.Optional[bool]`
@@ -4199,7 +4403,7 @@ client.web_search_gpt3.async_google_gpt(
-**quality:** `typing.Optional[float]` +**seed:** `typing.Optional[int]`
@@ -4207,7 +4411,7 @@ client.web_search_gpt3.async_google_gpt(
-**max_tokens:** `typing.Optional[int]` +**image_guidance_scale:** `typing.Optional[float]`
@@ -4215,7 +4419,7 @@ client.web_search_gpt3.async_google_gpt(
-**sampling_temperature:** `typing.Optional[float]` +**settings:** `typing.Optional[RunSettings]`
@@ -4223,23 +4427,55 @@ client.web_search_gpt3.async_google_gpt(
-**max_search_urls:** `typing.Optional[int]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+ + + + +
+
client.remove_background(...)
-**max_references:** `typing.Optional[int]` - +#### 🔌 Usage + +
+
+ +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.remove_background( + input_image="input_image", +) + +```
+
+
+ +#### ⚙️ Parameters
-**max_context_words:** `typing.Optional[int]` +
+
+ +**input_image:** `str`
@@ -4247,7 +4483,7 @@ client.web_search_gpt3.async_google_gpt(
-**scroll_jump:** `typing.Optional[int]` +**example_id:** `typing.Optional[str]`
@@ -4255,7 +4491,7 @@ client.web_search_gpt3.async_google_gpt(
-**embedding_model:** `typing.Optional[GoogleGptPageRequestEmbeddingModel]` +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -4263,10 +4499,7 @@ client.web_search_gpt3.async_google_gpt(
-**dense_weight:** `typing.Optional[float]` - -Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. -Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. +**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -4274,7 +4507,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**settings:** `typing.Optional[RunSettings]` +**selected_model:** `typing.Optional[ImageSegmentationPageRequestSelectedModel]`
@@ -4282,56 +4515,55 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**mask_threshold:** `typing.Optional[float]`
-
-
+
+
+**rect_persepective_transform:** `typing.Optional[bool]` +
-
-
client.web_search_gpt3.status_google_gpt(...)
-#### 🔌 Usage +**reflection_opacity:** `typing.Optional[float]` + +
+
+**obj_scale:** `typing.Optional[float]` + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.web_search_gpt3.status_google_gpt( - run_id="run_id", -) - -``` -
-
+**obj_pos_x:** `typing.Optional[float]` + -#### ⚙️ Parameters -
+**obj_pos_y:** `typing.Optional[float]` + +
+
+
-**run_id:** `str` +**settings:** `typing.Optional[RunSettings]`
@@ -4351,8 +4583,7 @@ client.web_search_gpt3.status_google_gpt(
-## ProfileLookupGpt3ForAiPersonalizedEmails -
client.profile_lookup_gpt3for_ai_personalized_emails.social_lookup_email(...) +
client.upscale(...)
@@ -4368,11 +4599,10 @@ client.web_search_gpt3.status_google_gpt( from gooey import Gooey client = Gooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) -client.profile_lookup_gpt3for_ai_personalized_emails.social_lookup_email( - email_address="email_address", +client.upscale( + scale=1, ) ``` @@ -4389,23 +4619,7 @@ client.profile_lookup_gpt3for_ai_personalized_emails.social_lookup_email(
-**email_address:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**scale:** `int` — The final upsampling scale of the image
@@ -4413,7 +4627,7 @@ client.profile_lookup_gpt3for_ai_personalized_emails.social_lookup_email(
-**input_prompt:** `typing.Optional[str]` +**example_id:** `typing.Optional[str]`
@@ -4421,7 +4635,7 @@ client.profile_lookup_gpt3for_ai_personalized_emails.social_lookup_email(
-**selected_model:** `typing.Optional[SocialLookupEmailPageRequestSelectedModel]` +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -4429,7 +4643,7 @@ client.profile_lookup_gpt3for_ai_personalized_emails.social_lookup_email(
-**num_outputs:** `typing.Optional[int]` +**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -4437,7 +4651,7 @@ client.profile_lookup_gpt3for_ai_personalized_emails.social_lookup_email(
-**avoid_repetition:** `typing.Optional[bool]` +**input_image:** `typing.Optional[str]` — Input Image
@@ -4445,7 +4659,7 @@ client.profile_lookup_gpt3for_ai_personalized_emails.social_lookup_email(
-**quality:** `typing.Optional[float]` +**input_video:** `typing.Optional[str]` — Input Video
@@ -4453,7 +4667,7 @@ client.profile_lookup_gpt3for_ai_personalized_emails.social_lookup_email(
-**max_tokens:** `typing.Optional[int]` +**selected_models:** `typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]]`
@@ -4461,7 +4675,7 @@ client.profile_lookup_gpt3for_ai_personalized_emails.social_lookup_email(
-**sampling_temperature:** `typing.Optional[float]` +**selected_bg_model:** `typing.Optional[typing.Literal["real_esrgan_x2"]]`
@@ -4489,7 +4703,7 @@ client.profile_lookup_gpt3for_ai_personalized_emails.social_lookup_email(
-
client.profile_lookup_gpt3for_ai_personalized_emails.async_social_lookup_email(...) +
client.embed(...)
@@ -4505,11 +4719,10 @@ client.profile_lookup_gpt3for_ai_personalized_emails.social_lookup_email( from gooey import Gooey client = Gooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) -client.profile_lookup_gpt3for_ai_personalized_emails.async_social_lookup_email( - email_address="email_address", +client.embed( + texts=["texts"], ) ``` @@ -4526,7 +4739,7 @@ client.profile_lookup_gpt3for_ai_personalized_emails.async_social_lookup_email(
-**email_address:** `str` +**texts:** `typing.Sequence[str]`
@@ -4534,7 +4747,7 @@ client.profile_lookup_gpt3for_ai_personalized_emails.async_social_lookup_email(
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` +**example_id:** `typing.Optional[str]`
@@ -4542,7 +4755,7 @@ client.profile_lookup_gpt3for_ai_personalized_emails.async_social_lookup_email(
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -4550,7 +4763,7 @@ client.profile_lookup_gpt3for_ai_personalized_emails.async_social_lookup_email(
-**input_prompt:** `typing.Optional[str]` +**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -4558,7 +4771,7 @@ client.profile_lookup_gpt3for_ai_personalized_emails.async_social_lookup_email(
-**selected_model:** `typing.Optional[SocialLookupEmailPageRequestSelectedModel]` +**selected_model:** `typing.Optional[EmbeddingsPageRequestSelectedModel]`
@@ -4566,7 +4779,7 @@ client.profile_lookup_gpt3for_ai_personalized_emails.async_social_lookup_email(
-**num_outputs:** `typing.Optional[int]` +**settings:** `typing.Optional[RunSettings]`
@@ -4574,50 +4787,10 @@ client.profile_lookup_gpt3for_ai_personalized_emails.async_social_lookup_email(
-**avoid_repetition:** `typing.Optional[bool]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. -
-
- -
-
- -**quality:** `typing.Optional[float]` - -
-
- -
-
- -**max_tokens:** `typing.Optional[int]` - -
-
- -
-
- -**sampling_temperature:** `typing.Optional[float]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
+
+
@@ -4626,7 +4799,7 @@ client.profile_lookup_gpt3for_ai_personalized_emails.async_social_lookup_email(
-
client.profile_lookup_gpt3for_ai_personalized_emails.status_social_lookup_email(...) +
client.seo_people_also_ask_doc(...)
@@ -4642,11 +4815,10 @@ client.profile_lookup_gpt3for_ai_personalized_emails.async_social_lookup_email( from gooey import Gooey client = Gooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) -client.profile_lookup_gpt3for_ai_personalized_emails.status_social_lookup_email( - run_id="run_id", +client.seo_people_also_ask_doc( + search_query="search_query", ) ``` @@ -4663,7 +4835,7 @@ client.profile_lookup_gpt3for_ai_personalized_emails.status_social_lookup_email(
-**run_id:** `str` +**search_query:** `str`
@@ -4671,64 +4843,39 @@ client.profile_lookup_gpt3for_ai_personalized_emails.status_social_lookup_email(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**example_id:** `typing.Optional[str]`
-
-
+
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` +
-
-## BulkRunner -
client.bulk_runner.post(...)
-#### 🔌 Usage - -
-
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
-```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.bulk_runner.post( - documents=["documents"], - run_urls=["run_urls"], - input_columns={"key": "value"}, - output_columns={"key": "value"}, -) - -``` -
-
+**keyword_query:** `typing.Optional[RelatedQnADocPageRequestKeywordQuery]` +
-#### ⚙️ Parameters - -
-
-
-**documents:** `typing.Sequence[str]` - -Upload or link to a CSV or google sheet that contains your sample input data. -For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. -Remember to includes header names in your CSV too. +**documents:** `typing.Optional[typing.Sequence[str]]`
@@ -4736,10 +4883,7 @@ Remember to includes header names in your CSV too.
-**run_urls:** `typing.Sequence[str]` - -Provide one or more Gooey.AI workflow runs. -You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them. +**max_references:** `typing.Optional[int]`
@@ -4747,7 +4891,7 @@ You can add multiple runs from the same recipe (e.g. two versions of your copilo
-**input_columns:** `typing.Dict[str, str]` — For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it. +**max_context_words:** `typing.Optional[int]`
@@ -4755,7 +4899,7 @@ You can add multiple runs from the same recipe (e.g. two versions of your copilo
-**output_columns:** `typing.Dict[str, str]` — For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data. +**scroll_jump:** `typing.Optional[int]`
@@ -4763,7 +4907,7 @@ You can add multiple runs from the same recipe (e.g. two versions of your copilo
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` +**doc_extract_url:** `typing.Optional[str]`
@@ -4771,7 +4915,7 @@ You can add multiple runs from the same recipe (e.g. two versions of your copilo
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**embedding_model:** `typing.Optional[RelatedQnADocPageRequestEmbeddingModel]`
@@ -4779,7 +4923,12 @@ You can add multiple runs from the same recipe (e.g. two versions of your copilo
-**eval_urls:** `typing.Optional[typing.Sequence[str]]` — _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs. +**dense_weight:** `typing.Optional[float]` + + +Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. +Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. +
@@ -4787,7 +4936,7 @@ You can add multiple runs from the same recipe (e.g. two versions of your copilo
-**settings:** `typing.Optional[RunSettings]` +**task_instructions:** `typing.Optional[str]`
@@ -4795,63 +4944,55 @@ You can add multiple runs from the same recipe (e.g. two versions of your copilo
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**query_instructions:** `typing.Optional[str]`
-
-
+
+
+**selected_model:** `typing.Optional[RelatedQnADocPageRequestSelectedModel]` +
-
-
client.bulk_runner.async_bulk_runner(...)
-#### 🔌 Usage +**citation_style:** `typing.Optional[RelatedQnADocPageRequestCitationStyle]` + +
+
+**avoid_repetition:** `typing.Optional[bool]` + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.bulk_runner.async_bulk_runner( - documents=["documents"], - run_urls=["run_urls"], - input_columns={"key": "value"}, - output_columns={"key": "value"}, -) - -``` -
-
+**num_outputs:** `typing.Optional[int]` + -#### ⚙️ Parameters -
+**quality:** `typing.Optional[float]` + +
+
+
-**documents:** `typing.Sequence[str]` - -Upload or link to a CSV or google sheet that contains your sample input data. -For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. -Remember to includes header names in your CSV too. +**max_tokens:** `typing.Optional[int]`
@@ -4859,10 +5000,7 @@ Remember to includes header names in your CSV too.
-**run_urls:** `typing.Sequence[str]` - -Provide one or more Gooey.AI workflow runs. -You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them. +**sampling_temperature:** `typing.Optional[float]`
@@ -4870,7 +5008,7 @@ You can add multiple runs from the same recipe (e.g. two versions of your copilo
-**input_columns:** `typing.Dict[str, str]` — For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it. +**response_format_type:** `typing.Optional[RelatedQnADocPageRequestResponseFormatType]`
@@ -4878,7 +5016,7 @@ You can add multiple runs from the same recipe (e.g. two versions of your copilo
-**output_columns:** `typing.Dict[str, str]` — For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data. +**serp_search_location:** `typing.Optional[SerpSearchLocation]`
@@ -4886,7 +5024,7 @@ You can add multiple runs from the same recipe (e.g. two versions of your copilo
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` +**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead
@@ -4894,7 +5032,7 @@ You can add multiple runs from the same recipe (e.g. two versions of your copilo
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**serp_search_type:** `typing.Optional[SerpSearchType]`
@@ -4902,7 +5040,7 @@ You can add multiple runs from the same recipe (e.g. two versions of your copilo
-**eval_urls:** `typing.Optional[typing.Sequence[str]]` — _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs. +**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead
@@ -4930,7 +5068,7 @@ You can add multiple runs from the same recipe (e.g. two versions of your copilo
-
client.bulk_runner.status_bulk_runner(...) +
client.health_status_get()
@@ -4946,12 +5084,9 @@ You can add multiple runs from the same recipe (e.g. two versions of your copilo from gooey import Gooey client = Gooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) -client.bulk_runner.status_bulk_runner( - run_id="run_id", -) +client.health_status_get() ```
@@ -4967,14 +5102,6 @@ client.bulk_runner.status_bulk_runner(
-**run_id:** `str` - -
-
- -
-
- **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -4987,8 +5114,8 @@ client.bulk_runner.status_bulk_runner(
-## Evaluator -
client.evaluator.bulk_eval(...) +## CopilotIntegrations +
client.copilot_integrations.video_bots_stream_create(...)
@@ -5004,11 +5131,10 @@ client.bulk_runner.status_bulk_runner( from gooey import Gooey client = Gooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) -client.evaluator.bulk_eval( - documents=["documents"], +client.copilot_integrations.video_bots_stream_create( + integration_id="integration_id", ) ``` @@ -5025,11 +5151,7 @@ client.evaluator.bulk_eval(
-**documents:** `typing.Sequence[str]` - -Upload or link to a CSV or google sheet that contains your sample input data. -For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. -Remember to includes header names in your CSV too. +**integration_id:** `str` — Your Integration ID as shown in the Copilot Integrations tab
@@ -5037,7 +5159,13 @@ Remember to includes header names in your CSV too.
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` +**conversation_id:** `typing.Optional[str]` + +The gooey conversation ID. + +If not provided, a new conversation will be started and a new ID will be returned in the response. Use this to maintain the state of the conversation between requests. + +Note that you may not provide a custom ID here, and must only use the `conversation_id` returned in a previous response.
@@ -5045,7 +5173,11 @@ Remember to includes header names in your CSV too.
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**user_id:** `typing.Optional[str]` + +Your app's custom user ID. + +If not provided, a random user will be created and a new ID will be returned in the response. If a `conversation_id` is provided, this field is automatically set to the user's id associated with that conversation.
@@ -5053,7 +5185,11 @@ Remember to includes header names in your CSV too.
-**selected_model:** `typing.Optional[BulkEvalPageRequestSelectedModel]` +**user_message_id:** `typing.Optional[str]` + +Your app's custom message ID for the user message. + +If not provided, a random ID will be generated and returned in the response. This is useful for tracking messages in the conversation.
@@ -5061,7 +5197,7 @@ Remember to includes header names in your CSV too.
-**avoid_repetition:** `typing.Optional[bool]` +**button_pressed:** `typing.Optional[ButtonPressed]` — The button that was pressed by the user.
@@ -5069,7 +5205,7 @@ Remember to includes header names in your CSV too.
-**num_outputs:** `typing.Optional[int]` +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -5077,7 +5213,7 @@ Remember to includes header names in your CSV too.
-**quality:** `typing.Optional[float]` +**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -5085,7 +5221,7 @@ Remember to includes header names in your CSV too.
-**max_tokens:** `typing.Optional[int]` +**input_prompt:** `typing.Optional[str]`
@@ -5093,7 +5229,7 @@ Remember to includes header names in your CSV too.
-**sampling_temperature:** `typing.Optional[float]` +**input_audio:** `typing.Optional[str]`
@@ -5101,11 +5237,15 @@ Remember to includes header names in your CSV too.
-**eval_prompts:** `typing.Optional[typing.Sequence[EvalPrompt]]` +**input_images:** `typing.Optional[typing.Sequence[str]]` + +
+
-Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values. -_The `columns` dictionary can be used to reference the spreadsheet columns._ +
+
+**input_documents:** `typing.Optional[typing.Sequence[str]]`
@@ -5113,7 +5253,7 @@ _The `columns` dictionary can be used to reference the spreadsheet columns._
-**agg_functions:** `typing.Optional[typing.Sequence[AggFunction]]` — Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats). +**doc_extract_url:** `typing.Optional[str]` — Select a workflow to extract text from documents and images.
@@ -5121,7 +5261,7 @@ _The `columns` dictionary can be used to reference the spreadsheet columns._
-**settings:** `typing.Optional[RunSettings]` +**messages:** `typing.Optional[typing.Sequence[ConversationEntry]]`
@@ -5129,60 +5269,55 @@ _The `columns` dictionary can be used to reference the spreadsheet columns._
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**bot_script:** `typing.Optional[str]`
+ +
+
+ +**selected_model:** `typing.Optional[CreateStreamRequestSelectedModel]` +
+
+
+**document_model:** `typing.Optional[str]` — When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) +
-
-
client.evaluator.async_bulk_eval(...)
-#### 🔌 Usage +**task_instructions:** `typing.Optional[str]` + +
+
+**query_instructions:** `typing.Optional[str]` + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.evaluator.async_bulk_eval( - documents=["documents"], -) - -``` -
-
+**keyword_instructions:** `typing.Optional[str]` + -#### ⚙️ Parameters -
-
-
- -**documents:** `typing.Sequence[str]` - -Upload or link to a CSV or google sheet that contains your sample input data. -For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. -Remember to includes header names in your CSV too. +**documents:** `typing.Optional[typing.Sequence[str]]`
@@ -5190,7 +5325,7 @@ Remember to includes header names in your CSV too.
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` +**max_references:** `typing.Optional[int]`
@@ -5198,7 +5333,7 @@ Remember to includes header names in your CSV too.
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**max_context_words:** `typing.Optional[int]`
@@ -5206,7 +5341,7 @@ Remember to includes header names in your CSV too.
-**selected_model:** `typing.Optional[BulkEvalPageRequestSelectedModel]` +**scroll_jump:** `typing.Optional[int]`
@@ -5214,7 +5349,7 @@ Remember to includes header names in your CSV too.
-**avoid_repetition:** `typing.Optional[bool]` +**embedding_model:** `typing.Optional[CreateStreamRequestEmbeddingModel]`
@@ -5222,15 +5357,12 @@ Remember to includes header names in your CSV too.
-**num_outputs:** `typing.Optional[int]` - -
-
+**dense_weight:** `typing.Optional[float]` -
-
-**quality:** `typing.Optional[float]` +Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. +Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. +
@@ -5238,7 +5370,7 @@ Remember to includes header names in your CSV too.
-**max_tokens:** `typing.Optional[int]` +**citation_style:** `typing.Optional[CreateStreamRequestCitationStyle]`
@@ -5246,7 +5378,7 @@ Remember to includes header names in your CSV too.
-**sampling_temperature:** `typing.Optional[float]` +**use_url_shortener:** `typing.Optional[bool]`
@@ -5254,11 +5386,7 @@ Remember to includes header names in your CSV too.
-**eval_prompts:** `typing.Optional[typing.Sequence[EvalPrompt]]` - -Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values. -_The `columns` dictionary can be used to reference the spreadsheet columns._ - +**asr_model:** `typing.Optional[CreateStreamRequestAsrModel]` — Choose a model to transcribe incoming audio messages to text.
@@ -5266,7 +5394,7 @@ _The `columns` dictionary can be used to reference the spreadsheet columns._
-**agg_functions:** `typing.Optional[typing.Sequence[AggFunction]]` — Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats). +**asr_language:** `typing.Optional[str]` — Choose a language to transcribe incoming audio messages to text.
@@ -5274,7 +5402,7 @@ _The `columns` dictionary can be used to reference the spreadsheet columns._
-**settings:** `typing.Optional[RunSettings]` +**translation_model:** `typing.Optional[CreateStreamRequestTranslationModel]`
@@ -5282,56 +5410,19 @@ _The `columns` dictionary can be used to reference the spreadsheet columns._
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**user_language:** `typing.Optional[str]` — Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages.
-
-
- - - - -
- -
client.evaluator.status_bulk_eval(...) -
-
- -#### 🔌 Usage - -
-
-```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.evaluator.status_bulk_eval( - run_id="run_id", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
+**input_glossary_document:** `typing.Optional[str]` -
-
-**run_id:** `str` +Translation Glossary for User Langauge -> LLM Language (English) +
@@ -5339,57 +5430,27 @@ client.evaluator.status_bulk_eval(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
+**output_glossary_document:** `typing.Optional[str]` +Translation Glossary for LLM Language (English) -> User Langauge + +
-
- -## SyntheticDataMakerForVideosPdFs -
client.synthetic_data_maker_for_videos_pd_fs.doc_extract(...) -
-
- -#### 🔌 Usage
-
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.synthetic_data_maker_for_videos_pd_fs.doc_extract( - documents=["documents"], -) - -``` -
-
+**lipsync_model:** `typing.Optional[CreateStreamRequestLipsyncModel]` +
-#### ⚙️ Parameters - -
-
-
-**documents:** `typing.Sequence[str]` +**tools:** `typing.Optional[typing.Sequence[LlmTools]]` — Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
@@ -5397,7 +5458,7 @@ client.synthetic_data_maker_for_videos_pd_fs.doc_extract(
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` +**avoid_repetition:** `typing.Optional[bool]`
@@ -5405,7 +5466,7 @@ client.synthetic_data_maker_for_videos_pd_fs.doc_extract(
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**num_outputs:** `typing.Optional[int]`
@@ -5413,7 +5474,7 @@ client.synthetic_data_maker_for_videos_pd_fs.doc_extract(
-**sheet_url:** `typing.Optional[str]` +**quality:** `typing.Optional[float]`
@@ -5421,7 +5482,7 @@ client.synthetic_data_maker_for_videos_pd_fs.doc_extract(
-**selected_asr_model:** `typing.Optional[DocExtractPageRequestSelectedAsrModel]` +**max_tokens:** `typing.Optional[int]`
@@ -5429,7 +5490,7 @@ client.synthetic_data_maker_for_videos_pd_fs.doc_extract(
-**google_translate_target:** `typing.Optional[str]` +**sampling_temperature:** `typing.Optional[float]`
@@ -5437,10 +5498,7 @@ client.synthetic_data_maker_for_videos_pd_fs.doc_extract(
-**glossary_document:** `typing.Optional[str]` - -Provide a glossary to customize translation and improve accuracy of domain-specific terms. -If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). +**response_format_type:** `typing.Optional[CreateStreamRequestResponseFormatType]`
@@ -5448,7 +5506,7 @@ If not specified or invalid, no glossary will be used. Read about the expected f
-**task_instructions:** `typing.Optional[str]` +**tts_provider:** `typing.Optional[CreateStreamRequestTtsProvider]`
@@ -5456,7 +5514,7 @@ If not specified or invalid, no glossary will be used. Read about the expected f
-**selected_model:** `typing.Optional[DocExtractPageRequestSelectedModel]` +**uberduck_voice_name:** `typing.Optional[str]`
@@ -5464,7 +5522,7 @@ If not specified or invalid, no glossary will be used. Read about the expected f
-**avoid_repetition:** `typing.Optional[bool]` +**uberduck_speaking_rate:** `typing.Optional[float]`
@@ -5472,7 +5530,7 @@ If not specified or invalid, no glossary will be used. Read about the expected f
-**num_outputs:** `typing.Optional[int]` +**google_voice_name:** `typing.Optional[str]`
@@ -5480,7 +5538,7 @@ If not specified or invalid, no glossary will be used. Read about the expected f
-**quality:** `typing.Optional[float]` +**google_speaking_rate:** `typing.Optional[float]`
@@ -5488,7 +5546,7 @@ If not specified or invalid, no glossary will be used. Read about the expected f
-**max_tokens:** `typing.Optional[int]` +**google_pitch:** `typing.Optional[float]`
@@ -5496,7 +5554,7 @@ If not specified or invalid, no glossary will be used. Read about the expected f
-**sampling_temperature:** `typing.Optional[float]` +**bark_history_prompt:** `typing.Optional[str]`
@@ -5504,7 +5562,7 @@ If not specified or invalid, no glossary will be used. Read about the expected f
-**settings:** `typing.Optional[RunSettings]` +**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead
@@ -5512,56 +5570,23 @@ If not specified or invalid, no glossary will be used. Read about the expected f
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**elevenlabs_api_key:** `typing.Optional[str]`
-
-
- - -
-
-
- -
client.synthetic_data_maker_for_videos_pd_fs.async_doc_extract(...) -
-
- -#### 🔌 Usage
-
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.synthetic_data_maker_for_videos_pd_fs.async_doc_extract( - documents=["documents"], -) - -``` -
-
+**elevenlabs_voice_id:** `typing.Optional[str]` +
-#### ⚙️ Parameters - -
-
-
-**documents:** `typing.Sequence[str]` +**elevenlabs_model:** `typing.Optional[str]`
@@ -5569,7 +5594,7 @@ client.synthetic_data_maker_for_videos_pd_fs.async_doc_extract(
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` +**elevenlabs_stability:** `typing.Optional[float]`
@@ -5577,7 +5602,7 @@ client.synthetic_data_maker_for_videos_pd_fs.async_doc_extract(
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**elevenlabs_similarity_boost:** `typing.Optional[float]`
@@ -5585,7 +5610,7 @@ client.synthetic_data_maker_for_videos_pd_fs.async_doc_extract(
-**sheet_url:** `typing.Optional[str]` +**elevenlabs_style:** `typing.Optional[float]`
@@ -5593,7 +5618,7 @@ client.synthetic_data_maker_for_videos_pd_fs.async_doc_extract(
-**selected_asr_model:** `typing.Optional[DocExtractPageRequestSelectedAsrModel]` +**elevenlabs_speaker_boost:** `typing.Optional[bool]`
@@ -5601,7 +5626,7 @@ client.synthetic_data_maker_for_videos_pd_fs.async_doc_extract(
-**google_translate_target:** `typing.Optional[str]` +**azure_voice_name:** `typing.Optional[str]`
@@ -5609,10 +5634,7 @@ client.synthetic_data_maker_for_videos_pd_fs.async_doc_extract(
-**glossary_document:** `typing.Optional[str]` - -Provide a glossary to customize translation and improve accuracy of domain-specific terms. -If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). +**openai_voice_name:** `typing.Optional[CreateStreamRequestOpenaiVoiceName]`
@@ -5620,7 +5642,7 @@ If not specified or invalid, no glossary will be used. Read about the expected f
-**task_instructions:** `typing.Optional[str]` +**openai_tts_model:** `typing.Optional[CreateStreamRequestOpenaiTtsModel]`
@@ -5628,7 +5650,7 @@ If not specified or invalid, no glossary will be used. Read about the expected f
-**selected_model:** `typing.Optional[DocExtractPageRequestSelectedModel]` +**input_face:** `typing.Optional[str]`
@@ -5636,7 +5658,7 @@ If not specified or invalid, no glossary will be used. Read about the expected f
-**avoid_repetition:** `typing.Optional[bool]` +**face_padding_top:** `typing.Optional[int]`
@@ -5644,7 +5666,7 @@ If not specified or invalid, no glossary will be used. Read about the expected f
-**num_outputs:** `typing.Optional[int]` +**face_padding_bottom:** `typing.Optional[int]`
@@ -5652,7 +5674,7 @@ If not specified or invalid, no glossary will be used. Read about the expected f
-**quality:** `typing.Optional[float]` +**face_padding_left:** `typing.Optional[int]`
@@ -5660,7 +5682,7 @@ If not specified or invalid, no glossary will be used. Read about the expected f
-**max_tokens:** `typing.Optional[int]` +**face_padding_right:** `typing.Optional[int]`
@@ -5668,7 +5690,7 @@ If not specified or invalid, no glossary will be used. Read about the expected f
-**sampling_temperature:** `typing.Optional[float]` +**sadtalker_settings:** `typing.Optional[SadTalkerSettings]`
@@ -5676,7 +5698,7 @@ If not specified or invalid, no glossary will be used. Read about the expected f
-**settings:** `typing.Optional[RunSettings]` +**input_text:** `typing.Optional[str]` — Use `input_prompt` instead
@@ -5696,7 +5718,7 @@ If not specified or invalid, no glossary will be used. Read about the expected f
-
client.synthetic_data_maker_for_videos_pd_fs.status_doc_extract(...) +
client.copilot_integrations.video_bots_stream(...)
@@ -5712,11 +5734,10 @@ If not specified or invalid, no glossary will be used. Read about the expected f from gooey import Gooey client = Gooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) -client.synthetic_data_maker_for_videos_pd_fs.status_doc_extract( - run_id="run_id", +client.copilot_integrations.video_bots_stream( + request_id="request_id", ) ``` @@ -5733,7 +5754,7 @@ client.synthetic_data_maker_for_videos_pd_fs.status_doc_extract(
-**run_id:** `str` +**request_id:** `str`
@@ -5753,8 +5774,8 @@ client.synthetic_data_maker_for_videos_pd_fs.status_doc_extract(
-## LargeLanguageModelsGpt3 -
client.large_language_models_gpt3.compare_llm(...) +## CopilotForYourEnterprise +
client.copilot_for_your_enterprise.async_video_bots(...)
@@ -5770,10 +5791,9 @@ client.synthetic_data_maker_for_videos_pd_fs.status_doc_extract( from gooey import Gooey client = Gooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) -client.large_language_models_gpt3.compare_llm() +client.copilot_for_your_enterprise.async_video_bots() ```
@@ -5789,7 +5809,7 @@ client.large_language_models_gpt3.compare_llm()
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` +**example_id:** `typing.Optional[str]`
@@ -5797,7 +5817,7 @@ client.large_language_models_gpt3.compare_llm()
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -5805,7 +5825,7 @@ client.large_language_models_gpt3.compare_llm()
-**input_prompt:** `typing.Optional[str]` +**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -5813,7 +5833,7 @@ client.large_language_models_gpt3.compare_llm()
-**selected_models:** `typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]]` +**input_prompt:** `typing.Optional[str]`
@@ -5821,7 +5841,7 @@ client.large_language_models_gpt3.compare_llm()
-**avoid_repetition:** `typing.Optional[bool]` +**input_audio:** `typing.Optional[str]`
@@ -5829,7 +5849,7 @@ client.large_language_models_gpt3.compare_llm()
-**num_outputs:** `typing.Optional[int]` +**input_images:** `typing.Optional[typing.Sequence[str]]`
@@ -5837,7 +5857,7 @@ client.large_language_models_gpt3.compare_llm()
-**quality:** `typing.Optional[float]` +**input_documents:** `typing.Optional[typing.Sequence[str]]`
@@ -5845,7 +5865,7 @@ client.large_language_models_gpt3.compare_llm()
-**max_tokens:** `typing.Optional[int]` +**doc_extract_url:** `typing.Optional[str]` — Select a workflow to extract text from documents and images.
@@ -5853,7 +5873,7 @@ client.large_language_models_gpt3.compare_llm()
-**sampling_temperature:** `typing.Optional[float]` +**messages:** `typing.Optional[typing.Sequence[ConversationEntry]]`
@@ -5861,7 +5881,7 @@ client.large_language_models_gpt3.compare_llm()
-**response_format_type:** `typing.Optional[CompareLlmPageRequestResponseFormatType]` +**bot_script:** `typing.Optional[str]`
@@ -5869,7 +5889,7 @@ client.large_language_models_gpt3.compare_llm()
-**settings:** `typing.Optional[RunSettings]` +**selected_model:** `typing.Optional[VideoBotsPageRequestSelectedModel]`
@@ -5877,54 +5897,55 @@ client.large_language_models_gpt3.compare_llm()
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**document_model:** `typing.Optional[str]` — When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api))
- -
+
+
+**task_instructions:** `typing.Optional[str]` +
-
-
client.large_language_models_gpt3.async_compare_llm(...)
-#### 🔌 Usage +**query_instructions:** `typing.Optional[str]` + +
+
+**keyword_instructions:** `typing.Optional[str]` + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.large_language_models_gpt3.async_compare_llm() - -``` -
-
+**documents:** `typing.Optional[typing.Sequence[str]]` + -#### ⚙️ Parameters -
+**max_references:** `typing.Optional[int]` + +
+
+
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` +**max_context_words:** `typing.Optional[int]`
@@ -5932,7 +5953,7 @@ client.large_language_models_gpt3.async_compare_llm()
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**scroll_jump:** `typing.Optional[int]`
@@ -5940,7 +5961,7 @@ client.large_language_models_gpt3.async_compare_llm()
-**input_prompt:** `typing.Optional[str]` +**embedding_model:** `typing.Optional[VideoBotsPageRequestEmbeddingModel]`
@@ -5948,7 +5969,12 @@ client.large_language_models_gpt3.async_compare_llm()
-**selected_models:** `typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]]` +**dense_weight:** `typing.Optional[float]` + + +Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. +Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. +
@@ -5956,7 +5982,7 @@ client.large_language_models_gpt3.async_compare_llm()
-**avoid_repetition:** `typing.Optional[bool]` +**citation_style:** `typing.Optional[VideoBotsPageRequestCitationStyle]`
@@ -5964,7 +5990,7 @@ client.large_language_models_gpt3.async_compare_llm()
-**num_outputs:** `typing.Optional[int]` +**use_url_shortener:** `typing.Optional[bool]`
@@ -5972,7 +5998,7 @@ client.large_language_models_gpt3.async_compare_llm()
-**quality:** `typing.Optional[float]` +**asr_model:** `typing.Optional[VideoBotsPageRequestAsrModel]` — Choose a model to transcribe incoming audio messages to text.
@@ -5980,7 +6006,7 @@ client.large_language_models_gpt3.async_compare_llm()
-**max_tokens:** `typing.Optional[int]` +**asr_language:** `typing.Optional[str]` — Choose a language to transcribe incoming audio messages to text.
@@ -5988,7 +6014,7 @@ client.large_language_models_gpt3.async_compare_llm()
-**sampling_temperature:** `typing.Optional[float]` +**translation_model:** `typing.Optional[VideoBotsPageRequestTranslationModel]`
@@ -5996,7 +6022,7 @@ client.large_language_models_gpt3.async_compare_llm()
-**response_format_type:** `typing.Optional[CompareLlmPageRequestResponseFormatType]` +**user_language:** `typing.Optional[str]` — Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages.
@@ -6004,7 +6030,11 @@ client.large_language_models_gpt3.async_compare_llm()
-**settings:** `typing.Optional[RunSettings]` +**input_glossary_document:** `typing.Optional[str]` + + +Translation Glossary for User Langauge -> LLM Language (English) +
@@ -6012,56 +6042,59 @@ client.large_language_models_gpt3.async_compare_llm()
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**output_glossary_document:** `typing.Optional[str]` + + +Translation Glossary for LLM Language (English) -> User Langauge +
- - +
+
+**lipsync_model:** `typing.Optional[VideoBotsPageRequestLipsyncModel]` +
-
-
client.large_language_models_gpt3.status_compare_llm(...)
-#### 🔌 Usage +**tools:** `typing.Optional[typing.Sequence[LlmTools]]` — Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). + +
+
+**avoid_repetition:** `typing.Optional[bool]` + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.large_language_models_gpt3.status_compare_llm( - run_id="run_id", -) - -``` -
-
+**num_outputs:** `typing.Optional[int]` + -#### ⚙️ Parameters -
+**quality:** `typing.Optional[float]` + +
+
+
-**run_id:** `str` +**max_tokens:** `typing.Optional[int]`
@@ -6069,57 +6102,55 @@ client.large_language_models_gpt3.status_compare_llm(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**sampling_temperature:** `typing.Optional[float]`
- - +
+
+**response_format_type:** `typing.Optional[VideoBotsPageRequestResponseFormatType]` +
-
-## SearchYourDocsWithGpt -
client.search_your_docs_with_gpt.doc_search(...)
-#### 🔌 Usage +**tts_provider:** `typing.Optional[VideoBotsPageRequestTtsProvider]` + +
+
+**uberduck_voice_name:** `typing.Optional[str]` + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.search_your_docs_with_gpt.doc_search( - search_query="search_query", -) - -``` -
-
+**uberduck_speaking_rate:** `typing.Optional[float]` + -#### ⚙️ Parameters -
+**google_voice_name:** `typing.Optional[str]` + +
+
+
-**search_query:** `str` +**google_speaking_rate:** `typing.Optional[float]`
@@ -6127,7 +6158,7 @@ client.search_your_docs_with_gpt.doc_search(
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` +**google_pitch:** `typing.Optional[float]`
@@ -6135,7 +6166,7 @@ client.search_your_docs_with_gpt.doc_search(
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**bark_history_prompt:** `typing.Optional[str]`
@@ -6143,7 +6174,7 @@ client.search_your_docs_with_gpt.doc_search(
-**keyword_query:** `typing.Optional[DocSearchPageRequestKeywordQuery]` +**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead
@@ -6151,7 +6182,7 @@ client.search_your_docs_with_gpt.doc_search(
-**documents:** `typing.Optional[typing.Sequence[str]]` +**elevenlabs_api_key:** `typing.Optional[str]`
@@ -6159,7 +6190,7 @@ client.search_your_docs_with_gpt.doc_search(
-**max_references:** `typing.Optional[int]` +**elevenlabs_voice_id:** `typing.Optional[str]`
@@ -6167,7 +6198,7 @@ client.search_your_docs_with_gpt.doc_search(
-**max_context_words:** `typing.Optional[int]` +**elevenlabs_model:** `typing.Optional[str]`
@@ -6175,7 +6206,7 @@ client.search_your_docs_with_gpt.doc_search(
-**scroll_jump:** `typing.Optional[int]` +**elevenlabs_stability:** `typing.Optional[float]`
@@ -6183,7 +6214,7 @@ client.search_your_docs_with_gpt.doc_search(
-**doc_extract_url:** `typing.Optional[str]` +**elevenlabs_similarity_boost:** `typing.Optional[float]`
@@ -6191,7 +6222,7 @@ client.search_your_docs_with_gpt.doc_search(
-**embedding_model:** `typing.Optional[DocSearchPageRequestEmbeddingModel]` +**elevenlabs_style:** `typing.Optional[float]`
@@ -6199,10 +6230,7 @@ client.search_your_docs_with_gpt.doc_search(
-**dense_weight:** `typing.Optional[float]` - -Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. -Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. +**elevenlabs_speaker_boost:** `typing.Optional[bool]`
@@ -6210,7 +6238,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**task_instructions:** `typing.Optional[str]` +**azure_voice_name:** `typing.Optional[str]`
@@ -6218,7 +6246,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**query_instructions:** `typing.Optional[str]` +**openai_voice_name:** `typing.Optional[VideoBotsPageRequestOpenaiVoiceName]`
@@ -6226,7 +6254,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**selected_model:** `typing.Optional[DocSearchPageRequestSelectedModel]` +**openai_tts_model:** `typing.Optional[VideoBotsPageRequestOpenaiTtsModel]`
@@ -6234,7 +6262,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**avoid_repetition:** `typing.Optional[bool]` +**input_face:** `typing.Optional[str]`
@@ -6242,7 +6270,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**num_outputs:** `typing.Optional[int]` +**face_padding_top:** `typing.Optional[int]`
@@ -6250,7 +6278,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**quality:** `typing.Optional[float]` +**face_padding_bottom:** `typing.Optional[int]`
@@ -6258,7 +6286,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**max_tokens:** `typing.Optional[int]` +**face_padding_left:** `typing.Optional[int]`
@@ -6266,7 +6294,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**sampling_temperature:** `typing.Optional[float]` +**face_padding_right:** `typing.Optional[int]`
@@ -6274,7 +6302,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**citation_style:** `typing.Optional[DocSearchPageRequestCitationStyle]` +**sadtalker_settings:** `typing.Optional[SadTalkerSettings]`
@@ -6302,7 +6330,8 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-
client.search_your_docs_with_gpt.async_doc_search(...) +## Evaluator +
client.evaluator.async_bulk_eval(...)
@@ -6318,11 +6347,10 @@ Generally speaking, dense embeddings excel at understanding the context of the q from gooey import Gooey client = Gooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) -client.search_your_docs_with_gpt.async_doc_search( - search_query="search_query", +client.evaluator.async_bulk_eval( + documents=["documents"], ) ``` @@ -6339,7 +6367,13 @@ client.search_your_docs_with_gpt.async_doc_search(
-**search_query:** `str` +**documents:** `typing.Sequence[str]` + + +Upload or link to a CSV or google sheet that contains your sample input data. +For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. +Remember to includes header names in your CSV too. +
@@ -6347,7 +6381,7 @@ client.search_your_docs_with_gpt.async_doc_search(
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` +**example_id:** `typing.Optional[str]`
@@ -6355,7 +6389,7 @@ client.search_your_docs_with_gpt.async_doc_search(
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -6363,7 +6397,7 @@ client.search_your_docs_with_gpt.async_doc_search(
-**keyword_query:** `typing.Optional[DocSearchPageRequestKeywordQuery]` +**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -6371,7 +6405,12 @@ client.search_your_docs_with_gpt.async_doc_search(
-**documents:** `typing.Optional[typing.Sequence[str]]` +**eval_prompts:** `typing.Optional[typing.Sequence[EvalPrompt]]` + + +Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values. +_The `columns` dictionary can be used to reference the spreadsheet columns._ +
@@ -6379,7 +6418,11 @@ client.search_your_docs_with_gpt.async_doc_search(
-**max_references:** `typing.Optional[int]` +**agg_functions:** `typing.Optional[typing.Sequence[AggFunction]]` + + +Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats). +
@@ -6387,7 +6430,7 @@ client.search_your_docs_with_gpt.async_doc_search(
-**max_context_words:** `typing.Optional[int]` +**selected_model:** `typing.Optional[BulkEvalPageRequestSelectedModel]`
@@ -6395,7 +6438,7 @@ client.search_your_docs_with_gpt.async_doc_search(
-**scroll_jump:** `typing.Optional[int]` +**avoid_repetition:** `typing.Optional[bool]`
@@ -6403,7 +6446,7 @@ client.search_your_docs_with_gpt.async_doc_search(
-**doc_extract_url:** `typing.Optional[str]` +**num_outputs:** `typing.Optional[int]`
@@ -6411,7 +6454,7 @@ client.search_your_docs_with_gpt.async_doc_search(
-**embedding_model:** `typing.Optional[DocSearchPageRequestEmbeddingModel]` +**quality:** `typing.Optional[float]`
@@ -6419,10 +6462,7 @@ client.search_your_docs_with_gpt.async_doc_search(
-**dense_weight:** `typing.Optional[float]` - -Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. -Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. +**max_tokens:** `typing.Optional[int]`
@@ -6430,7 +6470,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**task_instructions:** `typing.Optional[str]` +**sampling_temperature:** `typing.Optional[float]`
@@ -6438,7 +6478,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**query_instructions:** `typing.Optional[str]` +**response_format_type:** `typing.Optional[BulkEvalPageRequestResponseFormatType]`
@@ -6446,7 +6486,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**selected_model:** `typing.Optional[DocSearchPageRequestSelectedModel]` +**settings:** `typing.Optional[RunSettings]`
@@ -6454,133 +6494,20 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**avoid_repetition:** `typing.Optional[bool]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
-
-**num_outputs:** `typing.Optional[int]` -
+
-
-
- -**quality:** `typing.Optional[float]` - -
-
- -
-
- -**max_tokens:** `typing.Optional[int]` - -
-
- -
-
- -**sampling_temperature:** `typing.Optional[float]` - -
-
- -
-
- -**citation_style:** `typing.Optional[DocSearchPageRequestCitationStyle]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
- - - - - - -
- -
client.search_your_docs_with_gpt.status_doc_search(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.search_your_docs_with_gpt.status_doc_search( - run_id="run_id", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**run_id:** `str` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -## SmartGpt -
client.smart_gpt.post(...) +## SmartGpt +
client.smart_gpt.async_smart_gpt(...)
@@ -6596,10 +6523,9 @@ client.search_your_docs_with_gpt.status_doc_search( from gooey import Gooey client = Gooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) -client.smart_gpt.post( +client.smart_gpt.async_smart_gpt( input_prompt="input_prompt", ) @@ -6609,7735 +6535,15 @@ client.smart_gpt.post(
-#### ⚙️ Parameters - -
-
- -
-
- -**input_prompt:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**cot_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**reflexion_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**dera_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**selected_model:** `typing.Optional[SmartGptPageRequestSelectedModel]` - -
-
- -
-
- -**avoid_repetition:** `typing.Optional[bool]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[float]` - -
-
- -
-
- -**max_tokens:** `typing.Optional[int]` - -
-
- -
-
- -**sampling_temperature:** `typing.Optional[float]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - - - -
- -
client.smart_gpt.async_smart_gpt(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.smart_gpt.async_smart_gpt( - input_prompt="input_prompt", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**input_prompt:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**cot_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**reflexion_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**dera_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**selected_model:** `typing.Optional[SmartGptPageRequestSelectedModel]` - -
-
- -
-
- -**avoid_repetition:** `typing.Optional[bool]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[float]` - -
-
- -
-
- -**max_tokens:** `typing.Optional[int]` - -
-
- -
-
- -**sampling_temperature:** `typing.Optional[float]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.smart_gpt.status_smart_gpt(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.smart_gpt.status_smart_gpt( - run_id="run_id", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**run_id:** `str` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -## SummarizeYourDocsWithGpt -
client.summarize_your_docs_with_gpt.doc_summary(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.summarize_your_docs_with_gpt.doc_summary( - documents=["documents"], -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**documents:** `typing.Sequence[str]` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**task_instructions:** `typing.Optional[str]` - -
-
- -
-
- -**merge_instructions:** `typing.Optional[str]` - -
-
- -
-
- -**selected_model:** `typing.Optional[DocSummaryPageRequestSelectedModel]` - -
-
- -
-
- -**avoid_repetition:** `typing.Optional[bool]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[float]` - -
-
- -
-
- -**max_tokens:** `typing.Optional[int]` - -
-
- -
-
- -**sampling_temperature:** `typing.Optional[float]` - -
-
- -
-
- -**chain_type:** `typing.Optional[typing.Literal["map_reduce"]]` - -
-
- -
-
- -**selected_asr_model:** `typing.Optional[DocSummaryPageRequestSelectedAsrModel]` - -
-
- -
-
- -**google_translate_target:** `typing.Optional[str]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.summarize_your_docs_with_gpt.async_doc_summary(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.summarize_your_docs_with_gpt.async_doc_summary( - documents=["documents"], -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**documents:** `typing.Sequence[str]` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**task_instructions:** `typing.Optional[str]` - -
-
- -
-
- -**merge_instructions:** `typing.Optional[str]` - -
-
- -
-
- -**selected_model:** `typing.Optional[DocSummaryPageRequestSelectedModel]` - -
-
- -
-
- -**avoid_repetition:** `typing.Optional[bool]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[float]` - -
-
- -
-
- -**max_tokens:** `typing.Optional[int]` - -
-
- -
-
- -**sampling_temperature:** `typing.Optional[float]` - -
-
- -
-
- -**chain_type:** `typing.Optional[typing.Literal["map_reduce"]]` - -
-
- -
-
- -**selected_asr_model:** `typing.Optional[DocSummaryPageRequestSelectedAsrModel]` - -
-
- -
-
- -**google_translate_target:** `typing.Optional[str]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.summarize_your_docs_with_gpt.status_doc_summary(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.summarize_your_docs_with_gpt.status_doc_summary( - run_id="run_id", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**run_id:** `str` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -## Functions -
client.functions.post(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.functions.post() - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**code:** `typing.Optional[str]` — The JS code to be executed. - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used in the code - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.functions.async_functions(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.functions.async_functions() - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**code:** `typing.Optional[str]` — The JS code to be executed. - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used in the code - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.functions.status_functions(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.functions.status_functions( - run_id="run_id", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**run_id:** `str` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -## LipSyncing -
client.lip_syncing.lipsync(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.lip_syncing.lipsync() - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**input_face:** `typing.Optional[str]` - -
-
- -
-
- -**face_padding_top:** `typing.Optional[int]` - -
-
- -
-
- -**face_padding_bottom:** `typing.Optional[int]` - -
-
- -
-
- -**face_padding_left:** `typing.Optional[int]` - -
-
- -
-
- -**face_padding_right:** `typing.Optional[int]` - -
-
- -
-
- -**sadtalker_settings:** `typing.Optional[SadTalkerSettings]` - -
-
- -
-
- -**selected_model:** `typing.Optional[LipsyncPageRequestSelectedModel]` - -
-
- -
-
- -**input_audio:** `typing.Optional[str]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.lip_syncing.async_lipsync(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.lip_syncing.async_lipsync() - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**input_face:** `typing.Optional[str]` - -
-
- -
-
- -**face_padding_top:** `typing.Optional[int]` - -
-
- -
-
- -**face_padding_bottom:** `typing.Optional[int]` - -
-
- -
-
- -**face_padding_left:** `typing.Optional[int]` - -
-
- -
-
- -**face_padding_right:** `typing.Optional[int]` - -
-
- -
-
- -**sadtalker_settings:** `typing.Optional[SadTalkerSettings]` - -
-
- -
-
- -**selected_model:** `typing.Optional[LipsyncPageRequestSelectedModel]` - -
-
- -
-
- -**input_audio:** `typing.Optional[str]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.lip_syncing.status_lipsync(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.lip_syncing.status_lipsync( - run_id="run_id", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**run_id:** `str` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -## LipsyncVideoWithAnyText -
client.lipsync_video_with_any_text.lipsync_tts(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.lipsync_video_with_any_text.lipsync_tts( - text_prompt="text_prompt", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**text_prompt:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**tts_provider:** `typing.Optional[LipsyncTtsPageRequestTtsProvider]` - -
-
- -
-
- -**uberduck_voice_name:** `typing.Optional[str]` - -
-
- -
-
- -**uberduck_speaking_rate:** `typing.Optional[float]` - -
-
- -
-
- -**google_voice_name:** `typing.Optional[str]` - -
-
- -
-
- -**google_speaking_rate:** `typing.Optional[float]` - -
-
- -
-
- -**google_pitch:** `typing.Optional[float]` - -
-
- -
-
- -**bark_history_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead - -
-
- -
-
- -**elevenlabs_api_key:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_voice_id:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_model:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_stability:** `typing.Optional[float]` - -
-
- -
-
- -**elevenlabs_similarity_boost:** `typing.Optional[float]` - -
-
- -
-
- -**elevenlabs_style:** `typing.Optional[float]` - -
-
- -
-
- -**elevenlabs_speaker_boost:** `typing.Optional[bool]` - -
-
- -
-
- -**azure_voice_name:** `typing.Optional[str]` - -
-
- -
-
- -**openai_voice_name:** `typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName]` - -
-
- -
-
- -**openai_tts_model:** `typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel]` - -
-
- -
-
- -**input_face:** `typing.Optional[str]` - -
-
- -
-
- -**face_padding_top:** `typing.Optional[int]` - -
-
- -
-
- -**face_padding_bottom:** `typing.Optional[int]` - -
-
- -
-
- -**face_padding_left:** `typing.Optional[int]` - -
-
- -
-
- -**face_padding_right:** `typing.Optional[int]` - -
-
- -
-
- -**sadtalker_settings:** `typing.Optional[SadTalkerSettings]` - -
-
- -
-
- -**selected_model:** `typing.Optional[LipsyncTtsPageRequestSelectedModel]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.lipsync_video_with_any_text.async_lipsync_tts(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.lipsync_video_with_any_text.async_lipsync_tts( - text_prompt="text_prompt", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**text_prompt:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**tts_provider:** `typing.Optional[LipsyncTtsPageRequestTtsProvider]` - -
-
- -
-
- -**uberduck_voice_name:** `typing.Optional[str]` - -
-
- -
-
- -**uberduck_speaking_rate:** `typing.Optional[float]` - -
-
- -
-
- -**google_voice_name:** `typing.Optional[str]` - -
-
- -
-
- -**google_speaking_rate:** `typing.Optional[float]` - -
-
- -
-
- -**google_pitch:** `typing.Optional[float]` - -
-
- -
-
- -**bark_history_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead - -
-
- -
-
- -**elevenlabs_api_key:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_voice_id:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_model:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_stability:** `typing.Optional[float]` - -
-
- -
-
- -**elevenlabs_similarity_boost:** `typing.Optional[float]` - -
-
- -
-
- -**elevenlabs_style:** `typing.Optional[float]` - -
-
- -
-
- -**elevenlabs_speaker_boost:** `typing.Optional[bool]` - -
-
- -
-
- -**azure_voice_name:** `typing.Optional[str]` - -
-
- -
-
- -**openai_voice_name:** `typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName]` - -
-
- -
-
- -**openai_tts_model:** `typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel]` - -
-
- -
-
- -**input_face:** `typing.Optional[str]` - -
-
- -
-
- -**face_padding_top:** `typing.Optional[int]` - -
-
- -
-
- -**face_padding_bottom:** `typing.Optional[int]` - -
-
- -
-
- -**face_padding_left:** `typing.Optional[int]` - -
-
- -
-
- -**face_padding_right:** `typing.Optional[int]` - -
-
- -
-
- -**sadtalker_settings:** `typing.Optional[SadTalkerSettings]` - -
-
- -
-
- -**selected_model:** `typing.Optional[LipsyncTtsPageRequestSelectedModel]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.lipsync_video_with_any_text.status_lipsync_tts(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.lipsync_video_with_any_text.status_lipsync_tts( - run_id="run_id", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**run_id:** `str` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -## CompareAiVoiceGenerators -
client.compare_ai_voice_generators.text_to_speech(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.compare_ai_voice_generators.text_to_speech( - text_prompt="text_prompt", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**text_prompt:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**tts_provider:** `typing.Optional[TextToSpeechPageRequestTtsProvider]` - -
-
- -
-
- -**uberduck_voice_name:** `typing.Optional[str]` - -
-
- -
-
- -**uberduck_speaking_rate:** `typing.Optional[float]` - -
-
- -
-
- -**google_voice_name:** `typing.Optional[str]` - -
-
- -
-
- -**google_speaking_rate:** `typing.Optional[float]` - -
-
- -
-
- -**google_pitch:** `typing.Optional[float]` - -
-
- -
-
- -**bark_history_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead - -
-
- -
-
- -**elevenlabs_api_key:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_voice_id:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_model:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_stability:** `typing.Optional[float]` - -
-
- -
-
- -**elevenlabs_similarity_boost:** `typing.Optional[float]` - -
-
- -
-
- -**elevenlabs_style:** `typing.Optional[float]` - -
-
- -
-
- -**elevenlabs_speaker_boost:** `typing.Optional[bool]` - -
-
- -
-
- -**azure_voice_name:** `typing.Optional[str]` - -
-
- -
-
- -**openai_voice_name:** `typing.Optional[TextToSpeechPageRequestOpenaiVoiceName]` - -
-
- -
-
- -**openai_tts_model:** `typing.Optional[TextToSpeechPageRequestOpenaiTtsModel]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.compare_ai_voice_generators.async_text_to_speech(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.compare_ai_voice_generators.async_text_to_speech( - text_prompt="text_prompt", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**text_prompt:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**tts_provider:** `typing.Optional[TextToSpeechPageRequestTtsProvider]` - -
-
- -
-
- -**uberduck_voice_name:** `typing.Optional[str]` - -
-
- -
-
- -**uberduck_speaking_rate:** `typing.Optional[float]` - -
-
- -
-
- -**google_voice_name:** `typing.Optional[str]` - -
-
- -
-
- -**google_speaking_rate:** `typing.Optional[float]` - -
-
- -
-
- -**google_pitch:** `typing.Optional[float]` - -
-
- -
-
- -**bark_history_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead - -
-
- -
-
- -**elevenlabs_api_key:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_voice_id:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_model:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_stability:** `typing.Optional[float]` - -
-
- -
-
- -**elevenlabs_similarity_boost:** `typing.Optional[float]` - -
-
- -
-
- -**elevenlabs_style:** `typing.Optional[float]` - -
-
- -
-
- -**elevenlabs_speaker_boost:** `typing.Optional[bool]` - -
-
- -
-
- -**azure_voice_name:** `typing.Optional[str]` - -
-
- -
-
- -**openai_voice_name:** `typing.Optional[TextToSpeechPageRequestOpenaiVoiceName]` - -
-
- -
-
- -**openai_tts_model:** `typing.Optional[TextToSpeechPageRequestOpenaiTtsModel]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.compare_ai_voice_generators.status_text_to_speech(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.compare_ai_voice_generators.status_text_to_speech( - run_id="run_id", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**run_id:** `str` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -## SpeechRecognitionTranslation -
client.speech_recognition_translation.asr(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.speech_recognition_translation.asr( - documents=["documents"], -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**documents:** `typing.Sequence[str]` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**selected_model:** `typing.Optional[AsrPageRequestSelectedModel]` - -
-
- -
-
- -**language:** `typing.Optional[str]` - -
-
- -
-
- -**translation_model:** `typing.Optional[AsrPageRequestTranslationModel]` - -
-
- -
-
- -**output_format:** `typing.Optional[AsrPageRequestOutputFormat]` - -
-
- -
-
- -**google_translate_target:** `typing.Optional[str]` — use `translation_model` & `translation_target` instead. - -
-
- -
-
- -**translation_source:** `typing.Optional[str]` - -
-
- -
-
- -**translation_target:** `typing.Optional[str]` - -
-
- -
-
- -**glossary_document:** `typing.Optional[str]` - -Provide a glossary to customize translation and improve accuracy of domain-specific terms. -If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.speech_recognition_translation.async_asr(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.speech_recognition_translation.async_asr( - documents=["documents"], -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**documents:** `typing.Sequence[str]` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**selected_model:** `typing.Optional[AsrPageRequestSelectedModel]` - -
-
- -
-
- -**language:** `typing.Optional[str]` - -
-
- -
-
- -**translation_model:** `typing.Optional[AsrPageRequestTranslationModel]` - -
-
- -
-
- -**output_format:** `typing.Optional[AsrPageRequestOutputFormat]` - -
-
- -
-
- -**google_translate_target:** `typing.Optional[str]` — use `translation_model` & `translation_target` instead. - -
-
- -
-
- -**translation_source:** `typing.Optional[str]` - -
-
- -
-
- -**translation_target:** `typing.Optional[str]` - -
-
- -
-
- -**glossary_document:** `typing.Optional[str]` - -Provide a glossary to customize translation and improve accuracy of domain-specific terms. -If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.speech_recognition_translation.status_asr(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.speech_recognition_translation.status_asr( - run_id="run_id", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**run_id:** `str` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -## TextGuidedAudioGenerator -
client.text_guided_audio_generator.text2audio(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.text_guided_audio_generator.text2audio( - text_prompt="text_prompt", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**text_prompt:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**negative_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**duration_sec:** `typing.Optional[float]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[int]` - -
-
- -
-
- -**guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**seed:** `typing.Optional[int]` - -
-
- -
-
- -**sd2upscaling:** `typing.Optional[bool]` - -
-
- -
-
- -**selected_models:** `typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.text_guided_audio_generator.async_text2audio(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.text_guided_audio_generator.async_text2audio( - text_prompt="text_prompt", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**text_prompt:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**negative_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**duration_sec:** `typing.Optional[float]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[int]` - -
-
- -
-
- -**guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**seed:** `typing.Optional[int]` - -
-
- -
-
- -**sd2upscaling:** `typing.Optional[bool]` - -
-
- -
-
- -**selected_models:** `typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.text_guided_audio_generator.status_text2audio(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.text_guided_audio_generator.status_text2audio( - run_id="run_id", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**run_id:** `str` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -## CompareAiTranslations -
client.compare_ai_translations.translate(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.compare_ai_translations.translate() - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**texts:** `typing.Optional[typing.Sequence[str]]` - -
-
- -
-
- -**selected_model:** `typing.Optional[TranslationPageRequestSelectedModel]` - -
-
- -
-
- -**translation_source:** `typing.Optional[str]` - -
-
- -
-
- -**translation_target:** `typing.Optional[str]` - -
-
- -
-
- -**glossary_document:** `typing.Optional[str]` - -Provide a glossary to customize translation and improve accuracy of domain-specific terms. -If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.compare_ai_translations.async_translate(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.compare_ai_translations.async_translate() - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**texts:** `typing.Optional[typing.Sequence[str]]` - -
-
- -
-
- -**selected_model:** `typing.Optional[TranslationPageRequestSelectedModel]` - -
-
- -
-
- -**translation_source:** `typing.Optional[str]` - -
-
- -
-
- -**translation_target:** `typing.Optional[str]` - -
-
- -
-
- -**glossary_document:** `typing.Optional[str]` - -Provide a glossary to customize translation and improve accuracy of domain-specific terms. -If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.compare_ai_translations.status_translate(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.compare_ai_translations.status_translate( - run_id="run_id", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**run_id:** `str` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -## EditAnImageWithAiPrompt -
client.edit_an_image_with_ai_prompt.img2img(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.edit_an_image_with_ai_prompt.img2img( - input_image="input_image", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**input_image:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**text_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**selected_model:** `typing.Optional[Img2ImgPageRequestSelectedModel]` - -
-
- -
-
- -**selected_controlnet_model:** `typing.Optional[Img2ImgPageRequestSelectedControlnetModel]` - -
-
- -
-
- -**negative_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[int]` - -
-
- -
-
- -**output_width:** `typing.Optional[int]` - -
-
- -
-
- -**output_height:** `typing.Optional[int]` - -
-
- -
-
- -**guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**prompt_strength:** `typing.Optional[float]` - -
-
- -
-
- -**controlnet_conditioning_scale:** `typing.Optional[typing.Sequence[float]]` - -
-
- -
-
- -**seed:** `typing.Optional[int]` - -
-
- -
-
- -**image_guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.edit_an_image_with_ai_prompt.async_img2img(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.edit_an_image_with_ai_prompt.async_img2img( - input_image="input_image", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**input_image:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**text_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**selected_model:** `typing.Optional[Img2ImgPageRequestSelectedModel]` - -
-
- -
-
- -**selected_controlnet_model:** `typing.Optional[Img2ImgPageRequestSelectedControlnetModel]` - -
-
- -
-
- -**negative_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[int]` - -
-
- -
-
- -**output_width:** `typing.Optional[int]` - -
-
- -
-
- -**output_height:** `typing.Optional[int]` - -
-
- -
-
- -**guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**prompt_strength:** `typing.Optional[float]` - -
-
- -
-
- -**controlnet_conditioning_scale:** `typing.Optional[typing.Sequence[float]]` - -
-
- -
-
- -**seed:** `typing.Optional[int]` - -
-
- -
-
- -**image_guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.edit_an_image_with_ai_prompt.status_img2img(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.edit_an_image_with_ai_prompt.status_img2img( - run_id="run_id", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**run_id:** `str` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -## CompareAiImageGenerators -
client.compare_ai_image_generators.compare_text2img(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.compare_ai_image_generators.compare_text2img( - text_prompt="text_prompt", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**text_prompt:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**negative_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**output_width:** `typing.Optional[int]` - -
-
- -
-
- -**output_height:** `typing.Optional[int]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[int]` - -
-
- -
-
- -**dall_e3quality:** `typing.Optional[str]` - -
-
- -
-
- -**dall_e3style:** `typing.Optional[str]` - -
-
- -
-
- -**guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**seed:** `typing.Optional[int]` - -
-
- -
-
- -**sd2upscaling:** `typing.Optional[bool]` - -
-
- -
-
- -**selected_models:** `typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]]` - -
-
- -
-
- -**scheduler:** `typing.Optional[CompareText2ImgPageRequestScheduler]` - -
-
- -
-
- -**edit_instruction:** `typing.Optional[str]` - -
-
- -
-
- -**image_guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.compare_ai_image_generators.async_compare_text2img(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.compare_ai_image_generators.async_compare_text2img( - text_prompt="text_prompt", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**text_prompt:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**negative_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**output_width:** `typing.Optional[int]` - -
-
- -
-
- -**output_height:** `typing.Optional[int]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[int]` - -
-
- -
-
- -**dall_e3quality:** `typing.Optional[str]` - -
-
- -
-
- -**dall_e3style:** `typing.Optional[str]` - -
-
- -
-
- -**guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**seed:** `typing.Optional[int]` - -
-
- -
-
- -**sd2upscaling:** `typing.Optional[bool]` - -
-
- -
-
- -**selected_models:** `typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]]` - -
-
- -
-
- -**scheduler:** `typing.Optional[CompareText2ImgPageRequestScheduler]` - -
-
- -
-
- -**edit_instruction:** `typing.Optional[str]` - -
-
- -
-
- -**image_guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.compare_ai_image_generators.status_compare_text2img(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.compare_ai_image_generators.status_compare_text2img( - run_id="run_id", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**run_id:** `str` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -## GenerateProductPhotoBackgrounds -
client.generate_product_photo_backgrounds.object_inpainting(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.generate_product_photo_backgrounds.object_inpainting( - input_image="input_image", - text_prompt="text_prompt", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**input_image:** `str` - -
-
- -
-
- -**text_prompt:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**obj_scale:** `typing.Optional[float]` - -
-
- -
-
- -**obj_pos_x:** `typing.Optional[float]` - -
-
- -
-
- -**obj_pos_y:** `typing.Optional[float]` - -
-
- -
-
- -**mask_threshold:** `typing.Optional[float]` - -
-
- -
-
- -**selected_model:** `typing.Optional[ObjectInpaintingPageRequestSelectedModel]` - -
-
- -
-
- -**negative_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[int]` - -
-
- -
-
- -**output_width:** `typing.Optional[int]` - -
-
- -
-
- -**output_height:** `typing.Optional[int]` - -
-
- -
-
- -**guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**sd2upscaling:** `typing.Optional[bool]` - -
-
- -
-
- -**seed:** `typing.Optional[int]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.generate_product_photo_backgrounds.async_object_inpainting(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.generate_product_photo_backgrounds.async_object_inpainting( - input_image="input_image", - text_prompt="text_prompt", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**input_image:** `str` - -
-
- -
-
- -**text_prompt:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**obj_scale:** `typing.Optional[float]` - -
-
- -
-
- -**obj_pos_x:** `typing.Optional[float]` - -
-
- -
-
- -**obj_pos_y:** `typing.Optional[float]` - -
-
- -
-
- -**mask_threshold:** `typing.Optional[float]` - -
-
- -
-
- -**selected_model:** `typing.Optional[ObjectInpaintingPageRequestSelectedModel]` - -
-
- -
-
- -**negative_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[int]` - -
-
- -
-
- -**output_width:** `typing.Optional[int]` - -
-
- -
-
- -**output_height:** `typing.Optional[int]` - -
-
- -
-
- -**guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**sd2upscaling:** `typing.Optional[bool]` - -
-
- -
-
- -**seed:** `typing.Optional[int]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.generate_product_photo_backgrounds.status_object_inpainting(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.generate_product_photo_backgrounds.status_object_inpainting( - run_id="run_id", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**run_id:** `str` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -## AiImageWithAFace -
client.ai_image_with_a_face.face_inpainting(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.ai_image_with_a_face.face_inpainting( - input_image="input_image", - text_prompt="tony stark from the iron man", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**input_image:** `str` - -
-
- -
-
- -**text_prompt:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**face_scale:** `typing.Optional[float]` - -
-
- -
-
- -**face_pos_x:** `typing.Optional[float]` - -
-
- -
-
- -**face_pos_y:** `typing.Optional[float]` - -
-
- -
-
- -**selected_model:** `typing.Optional[FaceInpaintingPageRequestSelectedModel]` - -
-
- -
-
- -**negative_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[int]` - -
-
- -
-
- -**upscale_factor:** `typing.Optional[float]` - -
-
- -
-
- -**output_width:** `typing.Optional[int]` - -
-
- -
-
- -**output_height:** `typing.Optional[int]` - -
-
- -
-
- -**guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**seed:** `typing.Optional[int]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.ai_image_with_a_face.async_face_inpainting(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.ai_image_with_a_face.async_face_inpainting( - input_image="input_image", - text_prompt="tony stark from the iron man", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**input_image:** `str` - -
-
- -
-
- -**text_prompt:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**face_scale:** `typing.Optional[float]` - -
-
- -
-
- -**face_pos_x:** `typing.Optional[float]` - -
-
- -
-
- -**face_pos_y:** `typing.Optional[float]` - -
-
- -
-
- -**selected_model:** `typing.Optional[FaceInpaintingPageRequestSelectedModel]` - -
-
- -
-
- -**negative_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[int]` - -
-
- -
-
- -**upscale_factor:** `typing.Optional[float]` - -
-
- -
-
- -**output_width:** `typing.Optional[int]` - -
-
- -
-
- -**output_height:** `typing.Optional[int]` - -
-
- -
-
- -**guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**seed:** `typing.Optional[int]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.ai_image_with_a_face.status_face_inpainting(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.ai_image_with_a_face.status_face_inpainting( - run_id="run_id", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**run_id:** `str` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -## AiGeneratedPhotoFromEmailProfileLookup -
client.ai_generated_photo_from_email_profile_lookup.email_face_inpainting(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.ai_generated_photo_from_email_profile_lookup.email_face_inpainting( - email_address="sean@dara.network", - text_prompt="winter's day in paris", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**text_prompt:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**email_address:** `typing.Optional[str]` - -
-
- -
-
- -**twitter_handle:** `typing.Optional[str]` - -
-
- -
-
- -**face_scale:** `typing.Optional[float]` - -
-
- -
-
- -**face_pos_x:** `typing.Optional[float]` - -
-
- -
-
- -**face_pos_y:** `typing.Optional[float]` - -
-
- -
-
- -**selected_model:** `typing.Optional[EmailFaceInpaintingPageRequestSelectedModel]` - -
-
- -
-
- -**negative_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[int]` - -
-
- -
-
- -**upscale_factor:** `typing.Optional[float]` - -
-
- -
-
- -**output_width:** `typing.Optional[int]` - -
-
- -
-
- -**output_height:** `typing.Optional[int]` - -
-
- -
-
- -**guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**should_send_email:** `typing.Optional[bool]` - -
-
- -
-
- -**email_from:** `typing.Optional[str]` - -
-
- -
-
- -**email_cc:** `typing.Optional[str]` - -
-
- -
-
- -**email_bcc:** `typing.Optional[str]` - -
-
- -
-
- -**email_subject:** `typing.Optional[str]` - -
-
- -
-
- -**email_body:** `typing.Optional[str]` - -
-
- -
-
- -**email_body_enable_html:** `typing.Optional[bool]` - -
-
- -
-
- -**fallback_email_body:** `typing.Optional[str]` - -
-
- -
-
- -**seed:** `typing.Optional[int]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.ai_generated_photo_from_email_profile_lookup.async_email_face_inpainting(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.ai_generated_photo_from_email_profile_lookup.async_email_face_inpainting( - email_address="sean@dara.network", - text_prompt="winter's day in paris", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**text_prompt:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**email_address:** `typing.Optional[str]` - -
-
- -
-
- -**twitter_handle:** `typing.Optional[str]` - -
-
- -
-
- -**face_scale:** `typing.Optional[float]` - -
-
- -
-
- -**face_pos_x:** `typing.Optional[float]` - -
-
- -
-
- -**face_pos_y:** `typing.Optional[float]` - -
-
- -
-
- -**selected_model:** `typing.Optional[EmailFaceInpaintingPageRequestSelectedModel]` - -
-
- -
-
- -**negative_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[int]` - -
-
- -
-
- -**upscale_factor:** `typing.Optional[float]` - -
-
- -
-
- -**output_width:** `typing.Optional[int]` - -
-
- -
-
- -**output_height:** `typing.Optional[int]` - -
-
- -
-
- -**guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**should_send_email:** `typing.Optional[bool]` - -
-
- -
-
- -**email_from:** `typing.Optional[str]` - -
-
- -
-
- -**email_cc:** `typing.Optional[str]` - -
-
- -
-
- -**email_bcc:** `typing.Optional[str]` - -
-
- -
-
- -**email_subject:** `typing.Optional[str]` - -
-
- -
-
- -**email_body:** `typing.Optional[str]` - -
-
- -
-
- -**email_body_enable_html:** `typing.Optional[bool]` - -
-
- -
-
- -**fallback_email_body:** `typing.Optional[str]` - -
-
- -
-
- -**seed:** `typing.Optional[int]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.ai_generated_photo_from_email_profile_lookup.status_email_face_inpainting(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.ai_generated_photo_from_email_profile_lookup.status_email_face_inpainting( - run_id="run_id", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**run_id:** `str` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -## RenderImageSearchResultsWithAi -
client.render_image_search_results_with_ai.google_image_gen(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.render_image_search_results_with_ai.google_image_gen( - search_query="search_query", - text_prompt="text_prompt", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**search_query:** `str` - -
-
- -
-
- -**text_prompt:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**serp_search_location:** `typing.Optional[SerpSearchLocation]` - -
-
- -
-
- -**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead - -
-
- -
-
- -**selected_model:** `typing.Optional[GoogleImageGenPageRequestSelectedModel]` - -
-
- -
-
- -**negative_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[int]` - -
-
- -
-
- -**guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**prompt_strength:** `typing.Optional[float]` - -
-
- -
-
- -**sd2upscaling:** `typing.Optional[bool]` - -
-
- -
-
- -**seed:** `typing.Optional[int]` - -
-
- -
-
- -**image_guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.render_image_search_results_with_ai.async_google_image_gen(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.render_image_search_results_with_ai.async_google_image_gen( - search_query="search_query", - text_prompt="text_prompt", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**search_query:** `str` - -
-
- -
-
- -**text_prompt:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**serp_search_location:** `typing.Optional[SerpSearchLocation]` - -
-
- -
-
- -**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead - -
-
- -
-
- -**selected_model:** `typing.Optional[GoogleImageGenPageRequestSelectedModel]` - -
-
- -
-
- -**negative_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[int]` - -
-
- -
-
- -**guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**prompt_strength:** `typing.Optional[float]` - -
-
- -
-
- -**sd2upscaling:** `typing.Optional[bool]` - -
-
- -
-
- -**seed:** `typing.Optional[int]` - -
-
- -
-
- -**image_guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.render_image_search_results_with_ai.status_google_image_gen(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.render_image_search_results_with_ai.status_google_image_gen( - run_id="run_id", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**run_id:** `str` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -## AiBackgroundChanger -
client.ai_background_changer.image_segmentation(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.ai_background_changer.image_segmentation( - input_image="input_image", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**input_image:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**selected_model:** `typing.Optional[ImageSegmentationPageRequestSelectedModel]` - -
-
- -
-
- -**mask_threshold:** `typing.Optional[float]` - -
-
- -
-
- -**rect_persepective_transform:** `typing.Optional[bool]` - -
-
- -
-
- -**reflection_opacity:** `typing.Optional[float]` - -
-
- -
-
- -**obj_scale:** `typing.Optional[float]` - -
-
- -
-
- -**obj_pos_x:** `typing.Optional[float]` - -
-
- -
-
- -**obj_pos_y:** `typing.Optional[float]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.ai_background_changer.async_image_segmentation(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.ai_background_changer.async_image_segmentation( - input_image="input_image", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**input_image:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**selected_model:** `typing.Optional[ImageSegmentationPageRequestSelectedModel]` - -
-
- -
-
- -**mask_threshold:** `typing.Optional[float]` - -
-
- -
-
- -**rect_persepective_transform:** `typing.Optional[bool]` - -
-
- -
-
- -**reflection_opacity:** `typing.Optional[float]` - -
-
- -
-
- -**obj_scale:** `typing.Optional[float]` - -
-
- -
-
- -**obj_pos_x:** `typing.Optional[float]` - -
-
- -
-
- -**obj_pos_y:** `typing.Optional[float]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.ai_background_changer.status_image_segmentation(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.ai_background_changer.status_image_segmentation( - run_id="run_id", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**run_id:** `str` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -## CompareAiImageUpscalers -
client.compare_ai_image_upscalers.compare_ai_upscalers(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.compare_ai_image_upscalers.compare_ai_upscalers( - scale=1, -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**scale:** `int` — The final upsampling scale of the image - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**input_image:** `typing.Optional[str]` — Input Image - -
-
- -
-
- -**input_video:** `typing.Optional[str]` — Input Video - -
-
- -
-
- -**selected_models:** `typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]]` - -
-
- -
-
- -**selected_bg_model:** `typing.Optional[typing.Literal["real_esrgan_x2"]]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.compare_ai_image_upscalers.async_compare_ai_upscalers(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.compare_ai_image_upscalers.async_compare_ai_upscalers( - scale=1, -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**scale:** `int` — The final upsampling scale of the image - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**input_image:** `typing.Optional[str]` — Input Image - -
-
- -
-
- -**input_video:** `typing.Optional[str]` — Input Video - -
-
- -
-
- -**selected_models:** `typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]]` - -
-
- -
-
- -**selected_bg_model:** `typing.Optional[typing.Literal["real_esrgan_x2"]]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.compare_ai_image_upscalers.status_compare_ai_upscalers(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.compare_ai_image_upscalers.status_compare_ai_upscalers( - run_id="run_id", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**run_id:** `str` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -## ChyronPlantBot -
client.chyron_plant_bot.chyron_plant(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.chyron_plant_bot.chyron_plant( - midi_notes="C#1 B6 A2 A1 A3 A2", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**midi_notes:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**midi_notes_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**chyron_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.chyron_plant_bot.async_chyron_plant(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.chyron_plant_bot.async_chyron_plant( - midi_notes="C#1 B6 A2 A1 A3 A2", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**midi_notes:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**midi_notes_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**chyron_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.chyron_plant_bot.status_chyron_plant(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.chyron_plant_bot.status_chyron_plant( - run_id="run_id", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**run_id:** `str` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -## LetterWriter -
client.letter_writer.letter_writer(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.letter_writer.letter_writer( - action_id="action_id", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**action_id:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**prompt_header:** `typing.Optional[str]` - -
-
- -
-
- -**example_letters:** `typing.Optional[typing.Sequence[TrainingDataModel]]` - -
-
- -
-
- -**lm_selected_api:** `typing.Optional[str]` - -
-
- -
-
- -**lm_selected_engine:** `typing.Optional[str]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[float]` - -
-
- -
-
- -**lm_sampling_temperature:** `typing.Optional[float]` - -
-
- -
-
- -**api_http_method:** `typing.Optional[str]` - -
-
- -
-
- -**api_url:** `typing.Optional[str]` - -
-
- -
-
- -**api_headers:** `typing.Optional[str]` - -
-
- -
-
- -**api_json_body:** `typing.Optional[str]` - -
-
- -
-
- -**input_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**strip_html2text:** `typing.Optional[bool]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.letter_writer.async_letter_writer(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.letter_writer.async_letter_writer( - action_id="action_id", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**action_id:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**prompt_header:** `typing.Optional[str]` - -
-
- -
-
- -**example_letters:** `typing.Optional[typing.Sequence[TrainingDataModel]]` - -
-
- -
-
- -**lm_selected_api:** `typing.Optional[str]` - -
-
- -
-
- -**lm_selected_engine:** `typing.Optional[str]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[float]` - -
-
- -
-
- -**lm_sampling_temperature:** `typing.Optional[float]` - -
-
- -
-
- -**api_http_method:** `typing.Optional[str]` - -
-
- -
-
- -**api_url:** `typing.Optional[str]` - -
-
- -
-
- -**api_headers:** `typing.Optional[str]` - -
-
- -
-
- -**api_json_body:** `typing.Optional[str]` - -
-
- -
-
- -**input_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**strip_html2text:** `typing.Optional[bool]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.letter_writer.status_letter_writer(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.letter_writer.status_letter_writer( - run_id="run_id", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**run_id:** `str` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -## Embeddings -
client.embeddings.post(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.embeddings.post( - texts=["texts"], -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**texts:** `typing.Sequence[str]` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**selected_model:** `typing.Optional[EmbeddingsPageRequestSelectedModel]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.embeddings.async_embeddings(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.embeddings.async_embeddings( - texts=["texts"], -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**texts:** `typing.Sequence[str]` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**selected_model:** `typing.Optional[EmbeddingsPageRequestSelectedModel]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.embeddings.status_embeddings(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.embeddings.status_embeddings( - run_id="run_id", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**run_id:** `str` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -## PeopleAlsoAskAnswersFromADoc -
client.people_also_ask_answers_from_a_doc.related_qna_maker_doc(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.people_also_ask_answers_from_a_doc.related_qna_maker_doc( - search_query="search_query", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**search_query:** `str` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**keyword_query:** `typing.Optional[RelatedQnADocPageRequestKeywordQuery]` - -
-
- -
-
- -**documents:** `typing.Optional[typing.Sequence[str]]` - -
-
- -
-
- -**max_references:** `typing.Optional[int]` - -
-
+#### ⚙️ Parameters
-**max_context_words:** `typing.Optional[int]` +
+
+ +**input_prompt:** `str`
@@ -14345,7 +6551,7 @@ client.people_also_ask_answers_from_a_doc.related_qna_maker_doc(
-**scroll_jump:** `typing.Optional[int]` +**example_id:** `typing.Optional[str]`
@@ -14353,7 +6559,7 @@ client.people_also_ask_answers_from_a_doc.related_qna_maker_doc(
-**doc_extract_url:** `typing.Optional[str]` +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -14361,7 +6567,7 @@ client.people_also_ask_answers_from_a_doc.related_qna_maker_doc(
-**embedding_model:** `typing.Optional[RelatedQnADocPageRequestEmbeddingModel]` +**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -14369,10 +6575,7 @@ client.people_also_ask_answers_from_a_doc.related_qna_maker_doc(
-**dense_weight:** `typing.Optional[float]` - -Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. -Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. +**cot_prompt:** `typing.Optional[str]`
@@ -14380,7 +6583,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**task_instructions:** `typing.Optional[str]` +**reflexion_prompt:** `typing.Optional[str]`
@@ -14388,7 +6591,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**query_instructions:** `typing.Optional[str]` +**dera_prompt:** `typing.Optional[str]`
@@ -14396,7 +6599,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**selected_model:** `typing.Optional[RelatedQnADocPageRequestSelectedModel]` +**selected_model:** `typing.Optional[SmartGptPageRequestSelectedModel]`
@@ -14444,7 +6647,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**citation_style:** `typing.Optional[RelatedQnADocPageRequestCitationStyle]` +**response_format_type:** `typing.Optional[SmartGptPageRequestResponseFormatType]`
@@ -14452,7 +6655,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**serp_search_location:** `typing.Optional[SerpSearchLocation]` +**settings:** `typing.Optional[RunSettings]`
@@ -14460,35 +6663,49 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
-
-**serp_search_type:** `typing.Optional[SerpSearchType]` -
+
+
client.smart_gpt.post()
-**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead - -
-
+#### 🔌 Usage
-**settings:** `typing.Optional[RunSettings]` - +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.smart_gpt.post() + +``` +
+
+#### ⚙️ Parameters + +
+
+
@@ -14504,7 +6721,8 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-
client.people_also_ask_answers_from_a_doc.async_related_qna_maker_doc(...) +## Functions +
client.functions.async_functions(...)
@@ -14520,12 +6738,9 @@ Generally speaking, dense embeddings excel at understanding the context of the q from gooey import Gooey client = Gooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) -client.people_also_ask_answers_from_a_doc.async_related_qna_maker_doc( - search_query="search_query", -) +client.functions.async_functions() ```
@@ -14541,7 +6756,7 @@ client.people_also_ask_answers_from_a_doc.async_related_qna_maker_doc(
-**search_query:** `str` +**example_id:** `typing.Optional[str]`
@@ -14549,7 +6764,7 @@ client.people_also_ask_answers_from_a_doc.async_related_qna_maker_doc(
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` +**code:** `typing.Optional[str]` — The JS code to be executed.
@@ -14557,7 +6772,7 @@ client.people_also_ask_answers_from_a_doc.async_related_qna_maker_doc(
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used in the code
@@ -14565,7 +6780,7 @@ client.people_also_ask_answers_from_a_doc.async_related_qna_maker_doc(
-**keyword_query:** `typing.Optional[RelatedQnADocPageRequestKeywordQuery]` +**settings:** `typing.Optional[RunSettings]`
@@ -14573,106 +6788,100 @@ client.people_also_ask_answers_from_a_doc.async_related_qna_maker_doc(
-**documents:** `typing.Optional[typing.Sequence[str]]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
- -
-
- -**max_references:** `typing.Optional[int]` -
-
-
-**max_context_words:** `typing.Optional[int]` -
+
+
client.functions.post()
-**scroll_jump:** `typing.Optional[int]` - -
-
+#### 🔌 Usage
-**doc_extract_url:** `typing.Optional[str]` - -
-
-
-**embedding_model:** `typing.Optional[RelatedQnADocPageRequestEmbeddingModel]` - +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.functions.post() + +``` +
+
+#### ⚙️ Parameters +
-**dense_weight:** `typing.Optional[float]` +
+
-Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. -Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
-
-**task_instructions:** `typing.Optional[str]` -
+
+## LipSyncing +
client.lip_syncing.async_lipsync(...)
-**query_instructions:** `typing.Optional[str]` - -
-
+#### 🔌 Usage
-**selected_model:** `typing.Optional[RelatedQnADocPageRequestSelectedModel]` - -
-
-
-**avoid_repetition:** `typing.Optional[bool]` - +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.lip_syncing.async_lipsync() + +``` +
+
+#### ⚙️ Parameters +
-**num_outputs:** `typing.Optional[int]` - -
-
-
-**quality:** `typing.Optional[float]` +**example_id:** `typing.Optional[str]`
@@ -14680,7 +6889,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**max_tokens:** `typing.Optional[int]` +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -14688,7 +6897,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**sampling_temperature:** `typing.Optional[float]` +**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -14696,7 +6905,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**citation_style:** `typing.Optional[RelatedQnADocPageRequestCitationStyle]` +**input_face:** `typing.Optional[str]`
@@ -14704,7 +6913,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**serp_search_location:** `typing.Optional[SerpSearchLocation]` +**face_padding_top:** `typing.Optional[int]`
@@ -14712,7 +6921,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead +**face_padding_bottom:** `typing.Optional[int]`
@@ -14720,7 +6929,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**serp_search_type:** `typing.Optional[SerpSearchType]` +**face_padding_left:** `typing.Optional[int]`
@@ -14728,7 +6937,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead +**face_padding_right:** `typing.Optional[int]`
@@ -14736,7 +6945,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**settings:** `typing.Optional[RunSettings]` +**sadtalker_settings:** `typing.Optional[SadTalkerSettings]`
@@ -14744,56 +6953,23 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**selected_model:** `typing.Optional[LipsyncPageRequestSelectedModel]`
- - - - - - -
- -
client.people_also_ask_answers_from_a_doc.status_related_qna_maker_doc(...) -
-
- -#### 🔌 Usage
-
-
- -```python -from gooey import Gooey - -client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", -) -client.people_also_ask_answers_from_a_doc.status_related_qna_maker_doc( - run_id="run_id", -) - -``` -
-
+**input_audio:** `typing.Optional[str]` +
-#### ⚙️ Parameters - -
-
-
-**run_id:** `str` +**settings:** `typing.Optional[RunSettings]`
@@ -14830,7 +7006,6 @@ client.people_also_ask_answers_from_a_doc.status_related_qna_maker_doc( from gooey import Gooey client = Gooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) client.misc.get_balance() @@ -14877,7 +7052,6 @@ client.misc.get_balance() from gooey import Gooey client = Gooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) client.misc.video_bots_broadcast( @@ -14974,7 +7148,55 @@ client.misc.video_bots_broadcast(
-
client.misc.health() +## BulkRunner +
client.bulk_runner.post() +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.bulk_runner.post() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +## Embeddings +
client.embeddings.post()
@@ -14990,10 +7212,9 @@ client.misc.video_bots_broadcast( from gooey import Gooey client = Gooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) -client.misc.health() +client.embeddings.post() ```
diff --git a/src/gooey/__init__.py b/src/gooey/__init__.py index cf4a056..e9d8b46 100644 --- a/src/gooey/__init__.py +++ b/src/gooey/__init__.py @@ -10,7 +10,6 @@ AsrOutputJson, AsrPageOutput, AsrPageOutputOutputTextItem, - AsrPageRequest, AsrPageRequestOutputFormat, AsrPageRequestSelectedModel, AsrPageRequestTranslationModel, @@ -20,12 +19,9 @@ BalanceResponse, BotBroadcastFilters, BulkEvalPageOutput, - BulkEvalPageRequest, - BulkEvalPageRequestSelectedModel, BulkEvalPageResponse, BulkEvalPageStatusResponse, BulkRunnerPageOutput, - BulkRunnerPageRequest, BulkRunnerPageResponse, BulkRunnerPageStatusResponse, ButtonPressed, @@ -38,19 +34,16 @@ ChyronPlantPageResponse, ChyronPlantPageStatusResponse, CompareLlmPageOutput, - CompareLlmPageRequest, CompareLlmPageRequestResponseFormatType, CompareLlmPageRequestSelectedModelsItem, CompareLlmPageResponse, CompareLlmPageStatusResponse, CompareText2ImgPageOutput, - CompareText2ImgPageRequest, CompareText2ImgPageRequestScheduler, CompareText2ImgPageRequestSelectedModelsItem, CompareText2ImgPageResponse, CompareText2ImgPageStatusResponse, CompareUpscalerPageOutput, - CompareUpscalerPageRequest, CompareUpscalerPageRequestSelectedModelsItem, CompareUpscalerPageResponse, CompareUpscalerPageStatusResponse, @@ -65,43 +58,39 @@ ConversationStart, CreateStreamResponse, DeforumSdPageOutput, - DeforumSdPageRequest, DeforumSdPageRequestSelectedModel, DeforumSdPageResponse, DeforumSdPageStatusResponse, DocExtractPageOutput, - DocExtractPageRequest, + DocExtractPageRequestResponseFormatType, DocExtractPageRequestSelectedAsrModel, DocExtractPageRequestSelectedModel, DocExtractPageResponse, DocExtractPageStatusResponse, DocSearchPageOutput, - DocSearchPageRequest, DocSearchPageRequestCitationStyle, DocSearchPageRequestEmbeddingModel, DocSearchPageRequestKeywordQuery, + DocSearchPageRequestResponseFormatType, DocSearchPageRequestSelectedModel, DocSearchPageResponse, DocSearchPageStatusResponse, DocSummaryPageOutput, - DocSummaryPageRequest, + DocSummaryPageRequestResponseFormatType, DocSummaryPageRequestSelectedAsrModel, DocSummaryPageRequestSelectedModel, DocSummaryPageResponse, DocSummaryPageStatusResponse, EmailFaceInpaintingPageOutput, - EmailFaceInpaintingPageRequest, EmailFaceInpaintingPageRequestSelectedModel, EmailFaceInpaintingPageResponse, EmailFaceInpaintingPageStatusResponse, EmbeddingsPageOutput, - EmbeddingsPageRequest, EmbeddingsPageRequestSelectedModel, EmbeddingsPageResponse, EmbeddingsPageStatusResponse, EvalPrompt, FaceInpaintingPageOutput, - FaceInpaintingPageRequest, FaceInpaintingPageRequestSelectedModel, FaceInpaintingPageResponse, FaceInpaintingPageStatusResponse, @@ -109,32 +98,28 @@ FailedResponseDetail, FinalResponse, FunctionsPageOutput, - FunctionsPageRequest, FunctionsPageResponse, FunctionsPageStatusResponse, GenericErrorResponse, GenericErrorResponseDetail, GoogleGptPageOutput, - GoogleGptPageRequest, GoogleGptPageRequestEmbeddingModel, + GoogleGptPageRequestResponseFormatType, GoogleGptPageRequestSelectedModel, GoogleGptPageResponse, GoogleGptPageStatusResponse, GoogleImageGenPageOutput, - GoogleImageGenPageRequest, GoogleImageGenPageRequestSelectedModel, GoogleImageGenPageResponse, GoogleImageGenPageStatusResponse, HttpValidationError, ImageSegmentationPageOutput, - ImageSegmentationPageRequest, ImageSegmentationPageRequestSelectedModel, ImageSegmentationPageResponse, ImageSegmentationPageStatusResponse, ImageUrl, ImageUrlDetail, Img2ImgPageOutput, - Img2ImgPageRequest, Img2ImgPageRequestSelectedControlnetModel, Img2ImgPageRequestSelectedControlnetModelItem, Img2ImgPageRequestSelectedModel, @@ -145,12 +130,9 @@ LetterWriterPageResponse, LetterWriterPageStatusResponse, LipsyncPageOutput, - LipsyncPageRequest, - LipsyncPageRequestSelectedModel, LipsyncPageResponse, LipsyncPageStatusResponse, LipsyncTtsPageOutput, - LipsyncTtsPageRequest, LipsyncTtsPageRequestOpenaiTtsModel, LipsyncTtsPageRequestOpenaiVoiceName, LipsyncTtsPageRequestSelectedModel, @@ -160,14 +142,12 @@ LlmTools, MessagePart, ObjectInpaintingPageOutput, - ObjectInpaintingPageRequest, ObjectInpaintingPageRequestSelectedModel, ObjectInpaintingPageResponse, ObjectInpaintingPageStatusResponse, PromptTreeNode, PromptTreeNodePrompt, QrCodeGeneratorPageOutput, - QrCodeGeneratorPageRequest, QrCodeGeneratorPageRequestImagePromptControlnetModelsItem, QrCodeGeneratorPageRequestScheduler, QrCodeGeneratorPageRequestSelectedControlnetModelItem, @@ -180,16 +160,16 @@ RelatedDocSearchResponse, RelatedGoogleGptResponse, RelatedQnADocPageOutput, - RelatedQnADocPageRequest, RelatedQnADocPageRequestCitationStyle, RelatedQnADocPageRequestEmbeddingModel, RelatedQnADocPageRequestKeywordQuery, + RelatedQnADocPageRequestResponseFormatType, RelatedQnADocPageRequestSelectedModel, RelatedQnADocPageResponse, RelatedQnADocPageStatusResponse, RelatedQnAPageOutput, - RelatedQnAPageRequest, RelatedQnAPageRequestEmbeddingModel, + RelatedQnAPageRequestResponseFormatType, RelatedQnAPageRequestSelectedModel, RelatedQnAPageResponse, RelatedQnAPageStatusResponse, @@ -204,29 +184,25 @@ SadTalkerSettingsPreprocess, SearchReference, SeoSummaryPageOutput, - SeoSummaryPageRequest, + SeoSummaryPageRequestResponseFormatType, SeoSummaryPageRequestSelectedModel, SeoSummaryPageResponse, SeoSummaryPageStatusResponse, SerpSearchLocation, SerpSearchType, SmartGptPageOutput, - SmartGptPageRequest, - SmartGptPageRequestSelectedModel, SmartGptPageResponse, SmartGptPageStatusResponse, SocialLookupEmailPageOutput, - SocialLookupEmailPageRequest, + SocialLookupEmailPageRequestResponseFormatType, SocialLookupEmailPageRequestSelectedModel, SocialLookupEmailPageResponse, SocialLookupEmailPageStatusResponse, StreamError, Text2AudioPageOutput, - Text2AudioPageRequest, Text2AudioPageResponse, Text2AudioPageStatusResponse, TextToSpeechPageOutput, - TextToSpeechPageRequest, TextToSpeechPageRequestOpenaiTtsModel, TextToSpeechPageRequestOpenaiVoiceName, TextToSpeechPageRequestTtsProvider, @@ -234,7 +210,6 @@ TextToSpeechPageStatusResponse, TrainingDataModel, TranslationPageOutput, - TranslationPageRequest, TranslationPageRequestSelectedModel, TranslationPageResponse, TranslationPageStatusResponse, @@ -244,58 +219,34 @@ VideoBotsPageOutput, VideoBotsPageOutputFinalKeywordQuery, VideoBotsPageOutputFinalPrompt, - VideoBotsPageRequest, - VideoBotsPageRequestAsrModel, - VideoBotsPageRequestCitationStyle, - VideoBotsPageRequestEmbeddingModel, - VideoBotsPageRequestLipsyncModel, - VideoBotsPageRequestOpenaiTtsModel, - VideoBotsPageRequestOpenaiVoiceName, - VideoBotsPageRequestSelectedModel, - VideoBotsPageRequestTranslationModel, - VideoBotsPageRequestTtsProvider, VideoBotsPageResponse, VideoBotsPageStatusResponse, ) -from .errors import InternalServerError, PaymentRequiredError, TooManyRequestsError, UnprocessableEntityError +from .errors import PaymentRequiredError, TooManyRequestsError, UnprocessableEntityError from . import ( - ai_animation_generator, - ai_art_qr_code, - ai_background_changer, - ai_generated_photo_from_email_profile_lookup, - ai_image_with_a_face, bulk_runner, - chyron_plant_bot, - compare_ai_image_generators, - compare_ai_image_upscalers, - compare_ai_translations, - compare_ai_voice_generators, copilot_for_your_enterprise, copilot_integrations, - create_a_perfect_seo_optimized_title_paragraph, - edit_an_image_with_ai_prompt, embeddings, evaluator, functions, - generate_people_also_ask_seo_content, - generate_product_photo_backgrounds, - large_language_models_gpt3, - letter_writer, lip_syncing, - lipsync_video_with_any_text, misc, - people_also_ask_answers_from_a_doc, - profile_lookup_gpt3for_ai_personalized_emails, - render_image_search_results_with_ai, - search_your_docs_with_gpt, smart_gpt, - speech_recognition_translation, - summarize_your_docs_with_gpt, - synthetic_data_maker_for_videos_pd_fs, - text_guided_audio_generator, - web_search_gpt3, ) from .client import AsyncGooey, Gooey +from .copilot_for_your_enterprise import ( + VideoBotsPageRequestAsrModel, + VideoBotsPageRequestCitationStyle, + VideoBotsPageRequestEmbeddingModel, + VideoBotsPageRequestLipsyncModel, + VideoBotsPageRequestOpenaiTtsModel, + VideoBotsPageRequestOpenaiVoiceName, + VideoBotsPageRequestResponseFormatType, + VideoBotsPageRequestSelectedModel, + VideoBotsPageRequestTranslationModel, + VideoBotsPageRequestTtsProvider, +) from .copilot_integrations import ( CreateStreamRequestAsrModel, CreateStreamRequestCitationStyle, @@ -303,12 +254,16 @@ CreateStreamRequestLipsyncModel, CreateStreamRequestOpenaiTtsModel, CreateStreamRequestOpenaiVoiceName, + CreateStreamRequestResponseFormatType, CreateStreamRequestSelectedModel, CreateStreamRequestTranslationModel, CreateStreamRequestTtsProvider, VideoBotsStreamResponse, ) from .environment import GooeyEnvironment +from .evaluator import BulkEvalPageRequestResponseFormatType, BulkEvalPageRequestSelectedModel +from .lip_syncing import LipsyncPageRequestSelectedModel +from .smart_gpt import SmartGptPageRequestResponseFormatType, SmartGptPageRequestSelectedModel from .version import __version__ __all__ = [ @@ -321,7 +276,6 @@ "AsrOutputJson", "AsrPageOutput", "AsrPageOutputOutputTextItem", - "AsrPageRequest", "AsrPageRequestOutputFormat", "AsrPageRequestSelectedModel", "AsrPageRequestTranslationModel", @@ -332,12 +286,11 @@ "BalanceResponse", "BotBroadcastFilters", "BulkEvalPageOutput", - "BulkEvalPageRequest", + "BulkEvalPageRequestResponseFormatType", "BulkEvalPageRequestSelectedModel", "BulkEvalPageResponse", "BulkEvalPageStatusResponse", "BulkRunnerPageOutput", - "BulkRunnerPageRequest", "BulkRunnerPageResponse", "BulkRunnerPageStatusResponse", "ButtonPressed", @@ -350,19 +303,16 @@ "ChyronPlantPageResponse", "ChyronPlantPageStatusResponse", "CompareLlmPageOutput", - "CompareLlmPageRequest", "CompareLlmPageRequestResponseFormatType", "CompareLlmPageRequestSelectedModelsItem", "CompareLlmPageResponse", "CompareLlmPageStatusResponse", "CompareText2ImgPageOutput", - "CompareText2ImgPageRequest", "CompareText2ImgPageRequestScheduler", "CompareText2ImgPageRequestSelectedModelsItem", "CompareText2ImgPageResponse", "CompareText2ImgPageStatusResponse", "CompareUpscalerPageOutput", - "CompareUpscalerPageRequest", "CompareUpscalerPageRequestSelectedModelsItem", "CompareUpscalerPageResponse", "CompareUpscalerPageStatusResponse", @@ -381,48 +331,45 @@ "CreateStreamRequestLipsyncModel", "CreateStreamRequestOpenaiTtsModel", "CreateStreamRequestOpenaiVoiceName", + "CreateStreamRequestResponseFormatType", "CreateStreamRequestSelectedModel", "CreateStreamRequestTranslationModel", "CreateStreamRequestTtsProvider", "CreateStreamResponse", "DeforumSdPageOutput", - "DeforumSdPageRequest", "DeforumSdPageRequestSelectedModel", "DeforumSdPageResponse", "DeforumSdPageStatusResponse", "DocExtractPageOutput", - "DocExtractPageRequest", + "DocExtractPageRequestResponseFormatType", "DocExtractPageRequestSelectedAsrModel", "DocExtractPageRequestSelectedModel", "DocExtractPageResponse", "DocExtractPageStatusResponse", "DocSearchPageOutput", - "DocSearchPageRequest", "DocSearchPageRequestCitationStyle", "DocSearchPageRequestEmbeddingModel", "DocSearchPageRequestKeywordQuery", + "DocSearchPageRequestResponseFormatType", "DocSearchPageRequestSelectedModel", "DocSearchPageResponse", "DocSearchPageStatusResponse", "DocSummaryPageOutput", - "DocSummaryPageRequest", + "DocSummaryPageRequestResponseFormatType", "DocSummaryPageRequestSelectedAsrModel", "DocSummaryPageRequestSelectedModel", "DocSummaryPageResponse", "DocSummaryPageStatusResponse", "EmailFaceInpaintingPageOutput", - "EmailFaceInpaintingPageRequest", "EmailFaceInpaintingPageRequestSelectedModel", "EmailFaceInpaintingPageResponse", "EmailFaceInpaintingPageStatusResponse", "EmbeddingsPageOutput", - "EmbeddingsPageRequest", "EmbeddingsPageRequestSelectedModel", "EmbeddingsPageResponse", "EmbeddingsPageStatusResponse", "EvalPrompt", "FaceInpaintingPageOutput", - "FaceInpaintingPageRequest", "FaceInpaintingPageRequestSelectedModel", "FaceInpaintingPageResponse", "FaceInpaintingPageStatusResponse", @@ -430,7 +377,6 @@ "FailedResponseDetail", "FinalResponse", "FunctionsPageOutput", - "FunctionsPageRequest", "FunctionsPageResponse", "FunctionsPageStatusResponse", "GenericErrorResponse", @@ -438,43 +384,37 @@ "Gooey", "GooeyEnvironment", "GoogleGptPageOutput", - "GoogleGptPageRequest", "GoogleGptPageRequestEmbeddingModel", + "GoogleGptPageRequestResponseFormatType", "GoogleGptPageRequestSelectedModel", "GoogleGptPageResponse", "GoogleGptPageStatusResponse", "GoogleImageGenPageOutput", - "GoogleImageGenPageRequest", "GoogleImageGenPageRequestSelectedModel", "GoogleImageGenPageResponse", "GoogleImageGenPageStatusResponse", "HttpValidationError", "ImageSegmentationPageOutput", - "ImageSegmentationPageRequest", "ImageSegmentationPageRequestSelectedModel", "ImageSegmentationPageResponse", "ImageSegmentationPageStatusResponse", "ImageUrl", "ImageUrlDetail", "Img2ImgPageOutput", - "Img2ImgPageRequest", "Img2ImgPageRequestSelectedControlnetModel", "Img2ImgPageRequestSelectedControlnetModelItem", "Img2ImgPageRequestSelectedModel", "Img2ImgPageResponse", "Img2ImgPageStatusResponse", - "InternalServerError", "LetterWriterPageOutput", "LetterWriterPageRequest", "LetterWriterPageResponse", "LetterWriterPageStatusResponse", "LipsyncPageOutput", - "LipsyncPageRequest", "LipsyncPageRequestSelectedModel", "LipsyncPageResponse", "LipsyncPageStatusResponse", "LipsyncTtsPageOutput", - "LipsyncTtsPageRequest", "LipsyncTtsPageRequestOpenaiTtsModel", "LipsyncTtsPageRequestOpenaiVoiceName", "LipsyncTtsPageRequestSelectedModel", @@ -484,7 +424,6 @@ "LlmTools", "MessagePart", "ObjectInpaintingPageOutput", - "ObjectInpaintingPageRequest", "ObjectInpaintingPageRequestSelectedModel", "ObjectInpaintingPageResponse", "ObjectInpaintingPageStatusResponse", @@ -492,7 +431,6 @@ "PromptTreeNode", "PromptTreeNodePrompt", "QrCodeGeneratorPageOutput", - "QrCodeGeneratorPageRequest", "QrCodeGeneratorPageRequestImagePromptControlnetModelsItem", "QrCodeGeneratorPageRequestScheduler", "QrCodeGeneratorPageRequestSelectedControlnetModelItem", @@ -505,16 +443,16 @@ "RelatedDocSearchResponse", "RelatedGoogleGptResponse", "RelatedQnADocPageOutput", - "RelatedQnADocPageRequest", "RelatedQnADocPageRequestCitationStyle", "RelatedQnADocPageRequestEmbeddingModel", "RelatedQnADocPageRequestKeywordQuery", + "RelatedQnADocPageRequestResponseFormatType", "RelatedQnADocPageRequestSelectedModel", "RelatedQnADocPageResponse", "RelatedQnADocPageStatusResponse", "RelatedQnAPageOutput", - "RelatedQnAPageRequest", "RelatedQnAPageRequestEmbeddingModel", + "RelatedQnAPageRequestResponseFormatType", "RelatedQnAPageRequestSelectedModel", "RelatedQnAPageResponse", "RelatedQnAPageStatusResponse", @@ -529,29 +467,27 @@ "SadTalkerSettingsPreprocess", "SearchReference", "SeoSummaryPageOutput", - "SeoSummaryPageRequest", + "SeoSummaryPageRequestResponseFormatType", "SeoSummaryPageRequestSelectedModel", "SeoSummaryPageResponse", "SeoSummaryPageStatusResponse", "SerpSearchLocation", "SerpSearchType", "SmartGptPageOutput", - "SmartGptPageRequest", + "SmartGptPageRequestResponseFormatType", "SmartGptPageRequestSelectedModel", "SmartGptPageResponse", "SmartGptPageStatusResponse", "SocialLookupEmailPageOutput", - "SocialLookupEmailPageRequest", + "SocialLookupEmailPageRequestResponseFormatType", "SocialLookupEmailPageRequestSelectedModel", "SocialLookupEmailPageResponse", "SocialLookupEmailPageStatusResponse", "StreamError", "Text2AudioPageOutput", - "Text2AudioPageRequest", "Text2AudioPageResponse", "Text2AudioPageStatusResponse", "TextToSpeechPageOutput", - "TextToSpeechPageRequest", "TextToSpeechPageRequestOpenaiTtsModel", "TextToSpeechPageRequestOpenaiVoiceName", "TextToSpeechPageRequestTtsProvider", @@ -560,7 +496,6 @@ "TooManyRequestsError", "TrainingDataModel", "TranslationPageOutput", - "TranslationPageRequest", "TranslationPageRequestSelectedModel", "TranslationPageResponse", "TranslationPageStatusResponse", @@ -571,13 +506,13 @@ "VideoBotsPageOutput", "VideoBotsPageOutputFinalKeywordQuery", "VideoBotsPageOutputFinalPrompt", - "VideoBotsPageRequest", "VideoBotsPageRequestAsrModel", "VideoBotsPageRequestCitationStyle", "VideoBotsPageRequestEmbeddingModel", "VideoBotsPageRequestLipsyncModel", "VideoBotsPageRequestOpenaiTtsModel", "VideoBotsPageRequestOpenaiVoiceName", + "VideoBotsPageRequestResponseFormatType", "VideoBotsPageRequestSelectedModel", "VideoBotsPageRequestTranslationModel", "VideoBotsPageRequestTtsProvider", @@ -585,39 +520,13 @@ "VideoBotsPageStatusResponse", "VideoBotsStreamResponse", "__version__", - "ai_animation_generator", - "ai_art_qr_code", - "ai_background_changer", - "ai_generated_photo_from_email_profile_lookup", - "ai_image_with_a_face", "bulk_runner", - "chyron_plant_bot", - "compare_ai_image_generators", - "compare_ai_image_upscalers", - "compare_ai_translations", - "compare_ai_voice_generators", "copilot_for_your_enterprise", "copilot_integrations", - "create_a_perfect_seo_optimized_title_paragraph", - "edit_an_image_with_ai_prompt", "embeddings", "evaluator", "functions", - "generate_people_also_ask_seo_content", - "generate_product_photo_backgrounds", - "large_language_models_gpt3", - "letter_writer", "lip_syncing", - "lipsync_video_with_any_text", "misc", - "people_also_ask_answers_from_a_doc", - "profile_lookup_gpt3for_ai_personalized_emails", - "render_image_search_results_with_ai", - "search_your_docs_with_gpt", "smart_gpt", - "speech_recognition_translation", - "summarize_your_docs_with_gpt", - "synthetic_data_maker_for_videos_pd_fs", - "text_guided_audio_generator", - "web_search_gpt3", ] diff --git a/src/gooey/ai_animation_generator/__init__.py b/src/gooey/ai_animation_generator/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/ai_animation_generator/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/ai_animation_generator/client.py b/src/gooey/ai_animation_generator/client.py deleted file mode 100644 index 4ea5282..0000000 --- a/src/gooey/ai_animation_generator/client.py +++ /dev/null @@ -1,658 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.internal_server_error import InternalServerError -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.animation_prompt import AnimationPrompt -from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3 -from ..types.deforum_sd_page_request_selected_model import DeforumSdPageRequestSelectedModel -from ..types.deforum_sd_page_response import DeforumSdPageResponse -from ..types.deforum_sd_page_status_response import DeforumSdPageStatusResponse -from ..types.failed_reponse_model_v2 import FailedReponseModelV2 -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError -from ..types.recipe_function import RecipeFunction -from ..types.run_settings import RunSettings - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class AiAnimationGeneratorClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def deforum_sd( - self, - *, - animation_prompts: typing.Sequence[AnimationPrompt], - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - max_frames: typing.Optional[int] = OMIT, - selected_model: typing.Optional[DeforumSdPageRequestSelectedModel] = OMIT, - animation_mode: typing.Optional[str] = OMIT, - zoom: typing.Optional[str] = OMIT, - translation_x: typing.Optional[str] = OMIT, - translation_y: typing.Optional[str] = OMIT, - rotation3d_x: typing.Optional[str] = OMIT, - rotation3d_y: typing.Optional[str] = OMIT, - rotation3d_z: typing.Optional[str] = OMIT, - fps: typing.Optional[int] = OMIT, - seed: typing.Optional[int] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> DeforumSdPageResponse: - """ - Parameters - ---------- - animation_prompts : typing.Sequence[AnimationPrompt] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - max_frames : typing.Optional[int] - - selected_model : typing.Optional[DeforumSdPageRequestSelectedModel] - - animation_mode : typing.Optional[str] - - zoom : typing.Optional[str] - - translation_x : typing.Optional[str] - - translation_y : typing.Optional[str] - - rotation3d_x : typing.Optional[str] - - rotation3d_y : typing.Optional[str] - - rotation3d_z : typing.Optional[str] - - fps : typing.Optional[int] - - seed : typing.Optional[int] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DeforumSdPageResponse - Successful Response - - Examples - -------- - from gooey import AnimationPrompt, Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.ai_animation_generator.deforum_sd( - animation_prompts=[ - AnimationPrompt( - frame="frame", - prompt="prompt", - ) - ], - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v2/DeforumSD/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "animation_prompts": animation_prompts, - "max_frames": max_frames, - "selected_model": selected_model, - "animation_mode": animation_mode, - "zoom": zoom, - "translation_x": translation_x, - "translation_y": translation_y, - "rotation_3d_x": rotation3d_x, - "rotation_3d_y": rotation3d_y, - "rotation_3d_z": rotation3d_z, - "fps": fps, - "seed": seed, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(DeforumSdPageResponse, parse_obj_as(type_=DeforumSdPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def async_deforum_sd( - self, - *, - animation_prompts: typing.Sequence[AnimationPrompt], - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - max_frames: typing.Optional[int] = OMIT, - selected_model: typing.Optional[DeforumSdPageRequestSelectedModel] = OMIT, - animation_mode: typing.Optional[str] = OMIT, - zoom: typing.Optional[str] = OMIT, - translation_x: typing.Optional[str] = OMIT, - translation_y: typing.Optional[str] = OMIT, - rotation3d_x: typing.Optional[str] = OMIT, - rotation3d_y: typing.Optional[str] = OMIT, - rotation3d_z: typing.Optional[str] = OMIT, - fps: typing.Optional[int] = OMIT, - seed: typing.Optional[int] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - animation_prompts : typing.Sequence[AnimationPrompt] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - max_frames : typing.Optional[int] - - selected_model : typing.Optional[DeforumSdPageRequestSelectedModel] - - animation_mode : typing.Optional[str] - - zoom : typing.Optional[str] - - translation_x : typing.Optional[str] - - translation_y : typing.Optional[str] - - rotation3d_x : typing.Optional[str] - - rotation3d_y : typing.Optional[str] - - rotation3d_z : typing.Optional[str] - - fps : typing.Optional[int] - - seed : typing.Optional[int] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - from gooey import AnimationPrompt, Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.ai_animation_generator.async_deforum_sd( - animation_prompts=[ - AnimationPrompt( - frame="frame", - prompt="prompt", - ) - ], - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/DeforumSD/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "animation_prompts": animation_prompts, - "max_frames": max_frames, - "selected_model": selected_model, - "animation_mode": animation_mode, - "zoom": zoom, - "translation_x": translation_x, - "translation_y": translation_y, - "rotation_3d_x": rotation3d_x, - "rotation_3d_y": rotation3d_y, - "rotation_3d_z": rotation3d_z, - "fps": fps, - "seed": seed, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def status_deforum_sd( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> DeforumSdPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DeforumSdPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.ai_animation_generator.status_deforum_sd( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/DeforumSD/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(DeforumSdPageStatusResponse, parse_obj_as(type_=DeforumSdPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncAiAnimationGeneratorClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def deforum_sd( - self, - *, - animation_prompts: typing.Sequence[AnimationPrompt], - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - max_frames: typing.Optional[int] = OMIT, - selected_model: typing.Optional[DeforumSdPageRequestSelectedModel] = OMIT, - animation_mode: typing.Optional[str] = OMIT, - zoom: typing.Optional[str] = OMIT, - translation_x: typing.Optional[str] = OMIT, - translation_y: typing.Optional[str] = OMIT, - rotation3d_x: typing.Optional[str] = OMIT, - rotation3d_y: typing.Optional[str] = OMIT, - rotation3d_z: typing.Optional[str] = OMIT, - fps: typing.Optional[int] = OMIT, - seed: typing.Optional[int] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> DeforumSdPageResponse: - """ - Parameters - ---------- - animation_prompts : typing.Sequence[AnimationPrompt] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - max_frames : typing.Optional[int] - - selected_model : typing.Optional[DeforumSdPageRequestSelectedModel] - - animation_mode : typing.Optional[str] - - zoom : typing.Optional[str] - - translation_x : typing.Optional[str] - - translation_y : typing.Optional[str] - - rotation3d_x : typing.Optional[str] - - rotation3d_y : typing.Optional[str] - - rotation3d_z : typing.Optional[str] - - fps : typing.Optional[int] - - seed : typing.Optional[int] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DeforumSdPageResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AnimationPrompt, AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.ai_animation_generator.deforum_sd( - animation_prompts=[ - AnimationPrompt( - frame="frame", - prompt="prompt", - ) - ], - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v2/DeforumSD/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "animation_prompts": animation_prompts, - "max_frames": max_frames, - "selected_model": selected_model, - "animation_mode": animation_mode, - "zoom": zoom, - "translation_x": translation_x, - "translation_y": translation_y, - "rotation_3d_x": rotation3d_x, - "rotation_3d_y": rotation3d_y, - "rotation_3d_z": rotation3d_z, - "fps": fps, - "seed": seed, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(DeforumSdPageResponse, parse_obj_as(type_=DeforumSdPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def async_deforum_sd( - self, - *, - animation_prompts: typing.Sequence[AnimationPrompt], - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - max_frames: typing.Optional[int] = OMIT, - selected_model: typing.Optional[DeforumSdPageRequestSelectedModel] = OMIT, - animation_mode: typing.Optional[str] = OMIT, - zoom: typing.Optional[str] = OMIT, - translation_x: typing.Optional[str] = OMIT, - translation_y: typing.Optional[str] = OMIT, - rotation3d_x: typing.Optional[str] = OMIT, - rotation3d_y: typing.Optional[str] = OMIT, - rotation3d_z: typing.Optional[str] = OMIT, - fps: typing.Optional[int] = OMIT, - seed: typing.Optional[int] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - animation_prompts : typing.Sequence[AnimationPrompt] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - max_frames : typing.Optional[int] - - selected_model : typing.Optional[DeforumSdPageRequestSelectedModel] - - animation_mode : typing.Optional[str] - - zoom : typing.Optional[str] - - translation_x : typing.Optional[str] - - translation_y : typing.Optional[str] - - rotation3d_x : typing.Optional[str] - - rotation3d_y : typing.Optional[str] - - rotation3d_z : typing.Optional[str] - - fps : typing.Optional[int] - - seed : typing.Optional[int] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - import asyncio - - from gooey import AnimationPrompt, AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.ai_animation_generator.async_deforum_sd( - animation_prompts=[ - AnimationPrompt( - frame="frame", - prompt="prompt", - ) - ], - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/DeforumSD/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "animation_prompts": animation_prompts, - "max_frames": max_frames, - "selected_model": selected_model, - "animation_mode": animation_mode, - "zoom": zoom, - "translation_x": translation_x, - "translation_y": translation_y, - "rotation_3d_x": rotation3d_x, - "rotation_3d_y": rotation3d_y, - "rotation_3d_z": rotation3d_z, - "fps": fps, - "seed": seed, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def status_deforum_sd( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> DeforumSdPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DeforumSdPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.ai_animation_generator.status_deforum_sd( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/DeforumSD/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(DeforumSdPageStatusResponse, parse_obj_as(type_=DeforumSdPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/ai_art_qr_code/__init__.py b/src/gooey/ai_art_qr_code/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/ai_art_qr_code/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/ai_art_qr_code/client.py b/src/gooey/ai_art_qr_code/client.py deleted file mode 100644 index 52dfbba..0000000 --- a/src/gooey/ai_art_qr_code/client.py +++ /dev/null @@ -1,885 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.internal_server_error import InternalServerError -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3 -from ..types.failed_reponse_model_v2 import FailedReponseModelV2 -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError -from ..types.qr_code_generator_page_request_image_prompt_controlnet_models_item import ( - QrCodeGeneratorPageRequestImagePromptControlnetModelsItem, -) -from ..types.qr_code_generator_page_request_scheduler import QrCodeGeneratorPageRequestScheduler -from ..types.qr_code_generator_page_request_selected_controlnet_model_item import ( - QrCodeGeneratorPageRequestSelectedControlnetModelItem, -) -from ..types.qr_code_generator_page_request_selected_model import QrCodeGeneratorPageRequestSelectedModel -from ..types.qr_code_generator_page_response import QrCodeGeneratorPageResponse -from ..types.qr_code_generator_page_status_response import QrCodeGeneratorPageStatusResponse -from ..types.recipe_function import RecipeFunction -from ..types.run_settings import RunSettings -from ..types.vcard import Vcard - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class AiArtQrCodeClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def art_qr_code( - self, - *, - text_prompt: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - qr_code_data: typing.Optional[str] = OMIT, - qr_code_input_image: typing.Optional[str] = OMIT, - qr_code_vcard: typing.Optional[Vcard] = OMIT, - qr_code_file: typing.Optional[str] = OMIT, - use_url_shortener: typing.Optional[bool] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - image_prompt: typing.Optional[str] = OMIT, - image_prompt_controlnet_models: typing.Optional[ - typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem] - ] = OMIT, - image_prompt_strength: typing.Optional[float] = OMIT, - image_prompt_scale: typing.Optional[float] = OMIT, - image_prompt_pos_x: typing.Optional[float] = OMIT, - image_prompt_pos_y: typing.Optional[float] = OMIT, - selected_model: typing.Optional[QrCodeGeneratorPageRequestSelectedModel] = OMIT, - selected_controlnet_model: typing.Optional[ - typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem] - ] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - scheduler: typing.Optional[QrCodeGeneratorPageRequestScheduler] = OMIT, - seed: typing.Optional[int] = OMIT, - obj_scale: typing.Optional[float] = OMIT, - obj_pos_x: typing.Optional[float] = OMIT, - obj_pos_y: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> QrCodeGeneratorPageResponse: - """ - Parameters - ---------- - text_prompt : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - qr_code_data : typing.Optional[str] - - qr_code_input_image : typing.Optional[str] - - qr_code_vcard : typing.Optional[Vcard] - - qr_code_file : typing.Optional[str] - - use_url_shortener : typing.Optional[bool] - - negative_prompt : typing.Optional[str] - - image_prompt : typing.Optional[str] - - image_prompt_controlnet_models : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]] - - image_prompt_strength : typing.Optional[float] - - image_prompt_scale : typing.Optional[float] - - image_prompt_pos_x : typing.Optional[float] - - image_prompt_pos_y : typing.Optional[float] - - selected_model : typing.Optional[QrCodeGeneratorPageRequestSelectedModel] - - selected_controlnet_model : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem]] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - scheduler : typing.Optional[QrCodeGeneratorPageRequestScheduler] - - seed : typing.Optional[int] - - obj_scale : typing.Optional[float] - - obj_pos_x : typing.Optional[float] - - obj_pos_y : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - QrCodeGeneratorPageResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.ai_art_qr_code.art_qr_code( - text_prompt="text_prompt", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v2/art-qr-code/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "qr_code_data": qr_code_data, - "qr_code_input_image": qr_code_input_image, - "qr_code_vcard": qr_code_vcard, - "qr_code_file": qr_code_file, - "use_url_shortener": use_url_shortener, - "text_prompt": text_prompt, - "negative_prompt": negative_prompt, - "image_prompt": image_prompt, - "image_prompt_controlnet_models": image_prompt_controlnet_models, - "image_prompt_strength": image_prompt_strength, - "image_prompt_scale": image_prompt_scale, - "image_prompt_pos_x": image_prompt_pos_x, - "image_prompt_pos_y": image_prompt_pos_y, - "selected_model": selected_model, - "selected_controlnet_model": selected_controlnet_model, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "controlnet_conditioning_scale": controlnet_conditioning_scale, - "num_outputs": num_outputs, - "quality": quality, - "scheduler": scheduler, - "seed": seed, - "obj_scale": obj_scale, - "obj_pos_x": obj_pos_x, - "obj_pos_y": obj_pos_y, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(QrCodeGeneratorPageResponse, parse_obj_as(type_=QrCodeGeneratorPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def async_art_qr_code( - self, - *, - text_prompt: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - qr_code_data: typing.Optional[str] = OMIT, - qr_code_input_image: typing.Optional[str] = OMIT, - qr_code_vcard: typing.Optional[Vcard] = OMIT, - qr_code_file: typing.Optional[str] = OMIT, - use_url_shortener: typing.Optional[bool] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - image_prompt: typing.Optional[str] = OMIT, - image_prompt_controlnet_models: typing.Optional[ - typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem] - ] = OMIT, - image_prompt_strength: typing.Optional[float] = OMIT, - image_prompt_scale: typing.Optional[float] = OMIT, - image_prompt_pos_x: typing.Optional[float] = OMIT, - image_prompt_pos_y: typing.Optional[float] = OMIT, - selected_model: typing.Optional[QrCodeGeneratorPageRequestSelectedModel] = OMIT, - selected_controlnet_model: typing.Optional[ - typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem] - ] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - scheduler: typing.Optional[QrCodeGeneratorPageRequestScheduler] = OMIT, - seed: typing.Optional[int] = OMIT, - obj_scale: typing.Optional[float] = OMIT, - obj_pos_x: typing.Optional[float] = OMIT, - obj_pos_y: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - text_prompt : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - qr_code_data : typing.Optional[str] - - qr_code_input_image : typing.Optional[str] - - qr_code_vcard : typing.Optional[Vcard] - - qr_code_file : typing.Optional[str] - - use_url_shortener : typing.Optional[bool] - - negative_prompt : typing.Optional[str] - - image_prompt : typing.Optional[str] - - image_prompt_controlnet_models : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]] - - image_prompt_strength : typing.Optional[float] - - image_prompt_scale : typing.Optional[float] - - image_prompt_pos_x : typing.Optional[float] - - image_prompt_pos_y : typing.Optional[float] - - selected_model : typing.Optional[QrCodeGeneratorPageRequestSelectedModel] - - selected_controlnet_model : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem]] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - scheduler : typing.Optional[QrCodeGeneratorPageRequestScheduler] - - seed : typing.Optional[int] - - obj_scale : typing.Optional[float] - - obj_pos_x : typing.Optional[float] - - obj_pos_y : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.ai_art_qr_code.async_art_qr_code( - text_prompt="text_prompt", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/art-qr-code/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "qr_code_data": qr_code_data, - "qr_code_input_image": qr_code_input_image, - "qr_code_vcard": qr_code_vcard, - "qr_code_file": qr_code_file, - "use_url_shortener": use_url_shortener, - "text_prompt": text_prompt, - "negative_prompt": negative_prompt, - "image_prompt": image_prompt, - "image_prompt_controlnet_models": image_prompt_controlnet_models, - "image_prompt_strength": image_prompt_strength, - "image_prompt_scale": image_prompt_scale, - "image_prompt_pos_x": image_prompt_pos_x, - "image_prompt_pos_y": image_prompt_pos_y, - "selected_model": selected_model, - "selected_controlnet_model": selected_controlnet_model, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "controlnet_conditioning_scale": controlnet_conditioning_scale, - "num_outputs": num_outputs, - "quality": quality, - "scheduler": scheduler, - "seed": seed, - "obj_scale": obj_scale, - "obj_pos_x": obj_pos_x, - "obj_pos_y": obj_pos_y, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def status_art_qr_code( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> QrCodeGeneratorPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - QrCodeGeneratorPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.ai_art_qr_code.status_art_qr_code( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/art-qr-code/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(QrCodeGeneratorPageStatusResponse, parse_obj_as(type_=QrCodeGeneratorPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncAiArtQrCodeClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def art_qr_code( - self, - *, - text_prompt: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - qr_code_data: typing.Optional[str] = OMIT, - qr_code_input_image: typing.Optional[str] = OMIT, - qr_code_vcard: typing.Optional[Vcard] = OMIT, - qr_code_file: typing.Optional[str] = OMIT, - use_url_shortener: typing.Optional[bool] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - image_prompt: typing.Optional[str] = OMIT, - image_prompt_controlnet_models: typing.Optional[ - typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem] - ] = OMIT, - image_prompt_strength: typing.Optional[float] = OMIT, - image_prompt_scale: typing.Optional[float] = OMIT, - image_prompt_pos_x: typing.Optional[float] = OMIT, - image_prompt_pos_y: typing.Optional[float] = OMIT, - selected_model: typing.Optional[QrCodeGeneratorPageRequestSelectedModel] = OMIT, - selected_controlnet_model: typing.Optional[ - typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem] - ] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - scheduler: typing.Optional[QrCodeGeneratorPageRequestScheduler] = OMIT, - seed: typing.Optional[int] = OMIT, - obj_scale: typing.Optional[float] = OMIT, - obj_pos_x: typing.Optional[float] = OMIT, - obj_pos_y: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> QrCodeGeneratorPageResponse: - """ - Parameters - ---------- - text_prompt : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - qr_code_data : typing.Optional[str] - - qr_code_input_image : typing.Optional[str] - - qr_code_vcard : typing.Optional[Vcard] - - qr_code_file : typing.Optional[str] - - use_url_shortener : typing.Optional[bool] - - negative_prompt : typing.Optional[str] - - image_prompt : typing.Optional[str] - - image_prompt_controlnet_models : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]] - - image_prompt_strength : typing.Optional[float] - - image_prompt_scale : typing.Optional[float] - - image_prompt_pos_x : typing.Optional[float] - - image_prompt_pos_y : typing.Optional[float] - - selected_model : typing.Optional[QrCodeGeneratorPageRequestSelectedModel] - - selected_controlnet_model : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem]] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - scheduler : typing.Optional[QrCodeGeneratorPageRequestScheduler] - - seed : typing.Optional[int] - - obj_scale : typing.Optional[float] - - obj_pos_x : typing.Optional[float] - - obj_pos_y : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - QrCodeGeneratorPageResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.ai_art_qr_code.art_qr_code( - text_prompt="text_prompt", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v2/art-qr-code/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "qr_code_data": qr_code_data, - "qr_code_input_image": qr_code_input_image, - "qr_code_vcard": qr_code_vcard, - "qr_code_file": qr_code_file, - "use_url_shortener": use_url_shortener, - "text_prompt": text_prompt, - "negative_prompt": negative_prompt, - "image_prompt": image_prompt, - "image_prompt_controlnet_models": image_prompt_controlnet_models, - "image_prompt_strength": image_prompt_strength, - "image_prompt_scale": image_prompt_scale, - "image_prompt_pos_x": image_prompt_pos_x, - "image_prompt_pos_y": image_prompt_pos_y, - "selected_model": selected_model, - "selected_controlnet_model": selected_controlnet_model, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "controlnet_conditioning_scale": controlnet_conditioning_scale, - "num_outputs": num_outputs, - "quality": quality, - "scheduler": scheduler, - "seed": seed, - "obj_scale": obj_scale, - "obj_pos_x": obj_pos_x, - "obj_pos_y": obj_pos_y, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(QrCodeGeneratorPageResponse, parse_obj_as(type_=QrCodeGeneratorPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def async_art_qr_code( - self, - *, - text_prompt: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - qr_code_data: typing.Optional[str] = OMIT, - qr_code_input_image: typing.Optional[str] = OMIT, - qr_code_vcard: typing.Optional[Vcard] = OMIT, - qr_code_file: typing.Optional[str] = OMIT, - use_url_shortener: typing.Optional[bool] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - image_prompt: typing.Optional[str] = OMIT, - image_prompt_controlnet_models: typing.Optional[ - typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem] - ] = OMIT, - image_prompt_strength: typing.Optional[float] = OMIT, - image_prompt_scale: typing.Optional[float] = OMIT, - image_prompt_pos_x: typing.Optional[float] = OMIT, - image_prompt_pos_y: typing.Optional[float] = OMIT, - selected_model: typing.Optional[QrCodeGeneratorPageRequestSelectedModel] = OMIT, - selected_controlnet_model: typing.Optional[ - typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem] - ] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - scheduler: typing.Optional[QrCodeGeneratorPageRequestScheduler] = OMIT, - seed: typing.Optional[int] = OMIT, - obj_scale: typing.Optional[float] = OMIT, - obj_pos_x: typing.Optional[float] = OMIT, - obj_pos_y: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - text_prompt : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - qr_code_data : typing.Optional[str] - - qr_code_input_image : typing.Optional[str] - - qr_code_vcard : typing.Optional[Vcard] - - qr_code_file : typing.Optional[str] - - use_url_shortener : typing.Optional[bool] - - negative_prompt : typing.Optional[str] - - image_prompt : typing.Optional[str] - - image_prompt_controlnet_models : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]] - - image_prompt_strength : typing.Optional[float] - - image_prompt_scale : typing.Optional[float] - - image_prompt_pos_x : typing.Optional[float] - - image_prompt_pos_y : typing.Optional[float] - - selected_model : typing.Optional[QrCodeGeneratorPageRequestSelectedModel] - - selected_controlnet_model : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem]] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - scheduler : typing.Optional[QrCodeGeneratorPageRequestScheduler] - - seed : typing.Optional[int] - - obj_scale : typing.Optional[float] - - obj_pos_x : typing.Optional[float] - - obj_pos_y : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.ai_art_qr_code.async_art_qr_code( - text_prompt="text_prompt", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/art-qr-code/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "qr_code_data": qr_code_data, - "qr_code_input_image": qr_code_input_image, - "qr_code_vcard": qr_code_vcard, - "qr_code_file": qr_code_file, - "use_url_shortener": use_url_shortener, - "text_prompt": text_prompt, - "negative_prompt": negative_prompt, - "image_prompt": image_prompt, - "image_prompt_controlnet_models": image_prompt_controlnet_models, - "image_prompt_strength": image_prompt_strength, - "image_prompt_scale": image_prompt_scale, - "image_prompt_pos_x": image_prompt_pos_x, - "image_prompt_pos_y": image_prompt_pos_y, - "selected_model": selected_model, - "selected_controlnet_model": selected_controlnet_model, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "controlnet_conditioning_scale": controlnet_conditioning_scale, - "num_outputs": num_outputs, - "quality": quality, - "scheduler": scheduler, - "seed": seed, - "obj_scale": obj_scale, - "obj_pos_x": obj_pos_x, - "obj_pos_y": obj_pos_y, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def status_art_qr_code( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> QrCodeGeneratorPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - QrCodeGeneratorPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.ai_art_qr_code.status_art_qr_code( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/art-qr-code/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(QrCodeGeneratorPageStatusResponse, parse_obj_as(type_=QrCodeGeneratorPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/ai_background_changer/__init__.py b/src/gooey/ai_background_changer/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/ai_background_changer/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/ai_background_changer/client.py b/src/gooey/ai_background_changer/client.py deleted file mode 100644 index 889e83f..0000000 --- a/src/gooey/ai_background_changer/client.py +++ /dev/null @@ -1,573 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.internal_server_error import InternalServerError -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3 -from ..types.failed_reponse_model_v2 import FailedReponseModelV2 -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError -from ..types.image_segmentation_page_request_selected_model import ImageSegmentationPageRequestSelectedModel -from ..types.image_segmentation_page_response import ImageSegmentationPageResponse -from ..types.image_segmentation_page_status_response import ImageSegmentationPageStatusResponse -from ..types.recipe_function import RecipeFunction -from ..types.run_settings import RunSettings - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class AiBackgroundChangerClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def image_segmentation( - self, - *, - input_image: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - selected_model: typing.Optional[ImageSegmentationPageRequestSelectedModel] = OMIT, - mask_threshold: typing.Optional[float] = OMIT, - rect_persepective_transform: typing.Optional[bool] = OMIT, - reflection_opacity: typing.Optional[float] = OMIT, - obj_scale: typing.Optional[float] = OMIT, - obj_pos_x: typing.Optional[float] = OMIT, - obj_pos_y: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> ImageSegmentationPageResponse: - """ - Parameters - ---------- - input_image : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - selected_model : typing.Optional[ImageSegmentationPageRequestSelectedModel] - - mask_threshold : typing.Optional[float] - - rect_persepective_transform : typing.Optional[bool] - - reflection_opacity : typing.Optional[float] - - obj_scale : typing.Optional[float] - - obj_pos_x : typing.Optional[float] - - obj_pos_y : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ImageSegmentationPageResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.ai_background_changer.image_segmentation( - input_image="input_image", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v2/ImageSegmentation/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "selected_model": selected_model, - "mask_threshold": mask_threshold, - "rect_persepective_transform": rect_persepective_transform, - "reflection_opacity": reflection_opacity, - "obj_scale": obj_scale, - "obj_pos_x": obj_pos_x, - "obj_pos_y": obj_pos_y, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(ImageSegmentationPageResponse, parse_obj_as(type_=ImageSegmentationPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def async_image_segmentation( - self, - *, - input_image: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - selected_model: typing.Optional[ImageSegmentationPageRequestSelectedModel] = OMIT, - mask_threshold: typing.Optional[float] = OMIT, - rect_persepective_transform: typing.Optional[bool] = OMIT, - reflection_opacity: typing.Optional[float] = OMIT, - obj_scale: typing.Optional[float] = OMIT, - obj_pos_x: typing.Optional[float] = OMIT, - obj_pos_y: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - input_image : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - selected_model : typing.Optional[ImageSegmentationPageRequestSelectedModel] - - mask_threshold : typing.Optional[float] - - rect_persepective_transform : typing.Optional[bool] - - reflection_opacity : typing.Optional[float] - - obj_scale : typing.Optional[float] - - obj_pos_x : typing.Optional[float] - - obj_pos_y : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.ai_background_changer.async_image_segmentation( - input_image="input_image", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/ImageSegmentation/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "selected_model": selected_model, - "mask_threshold": mask_threshold, - "rect_persepective_transform": rect_persepective_transform, - "reflection_opacity": reflection_opacity, - "obj_scale": obj_scale, - "obj_pos_x": obj_pos_x, - "obj_pos_y": obj_pos_y, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def status_image_segmentation( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> ImageSegmentationPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ImageSegmentationPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.ai_background_changer.status_image_segmentation( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/ImageSegmentation/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(ImageSegmentationPageStatusResponse, parse_obj_as(type_=ImageSegmentationPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncAiBackgroundChangerClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def image_segmentation( - self, - *, - input_image: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - selected_model: typing.Optional[ImageSegmentationPageRequestSelectedModel] = OMIT, - mask_threshold: typing.Optional[float] = OMIT, - rect_persepective_transform: typing.Optional[bool] = OMIT, - reflection_opacity: typing.Optional[float] = OMIT, - obj_scale: typing.Optional[float] = OMIT, - obj_pos_x: typing.Optional[float] = OMIT, - obj_pos_y: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> ImageSegmentationPageResponse: - """ - Parameters - ---------- - input_image : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - selected_model : typing.Optional[ImageSegmentationPageRequestSelectedModel] - - mask_threshold : typing.Optional[float] - - rect_persepective_transform : typing.Optional[bool] - - reflection_opacity : typing.Optional[float] - - obj_scale : typing.Optional[float] - - obj_pos_x : typing.Optional[float] - - obj_pos_y : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ImageSegmentationPageResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.ai_background_changer.image_segmentation( - input_image="input_image", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v2/ImageSegmentation/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "selected_model": selected_model, - "mask_threshold": mask_threshold, - "rect_persepective_transform": rect_persepective_transform, - "reflection_opacity": reflection_opacity, - "obj_scale": obj_scale, - "obj_pos_x": obj_pos_x, - "obj_pos_y": obj_pos_y, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(ImageSegmentationPageResponse, parse_obj_as(type_=ImageSegmentationPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def async_image_segmentation( - self, - *, - input_image: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - selected_model: typing.Optional[ImageSegmentationPageRequestSelectedModel] = OMIT, - mask_threshold: typing.Optional[float] = OMIT, - rect_persepective_transform: typing.Optional[bool] = OMIT, - reflection_opacity: typing.Optional[float] = OMIT, - obj_scale: typing.Optional[float] = OMIT, - obj_pos_x: typing.Optional[float] = OMIT, - obj_pos_y: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - input_image : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - selected_model : typing.Optional[ImageSegmentationPageRequestSelectedModel] - - mask_threshold : typing.Optional[float] - - rect_persepective_transform : typing.Optional[bool] - - reflection_opacity : typing.Optional[float] - - obj_scale : typing.Optional[float] - - obj_pos_x : typing.Optional[float] - - obj_pos_y : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.ai_background_changer.async_image_segmentation( - input_image="input_image", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/ImageSegmentation/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "selected_model": selected_model, - "mask_threshold": mask_threshold, - "rect_persepective_transform": rect_persepective_transform, - "reflection_opacity": reflection_opacity, - "obj_scale": obj_scale, - "obj_pos_x": obj_pos_x, - "obj_pos_y": obj_pos_y, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def status_image_segmentation( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> ImageSegmentationPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ImageSegmentationPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.ai_background_changer.status_image_segmentation( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/ImageSegmentation/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(ImageSegmentationPageStatusResponse, parse_obj_as(type_=ImageSegmentationPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/ai_generated_photo_from_email_profile_lookup/__init__.py b/src/gooey/ai_generated_photo_from_email_profile_lookup/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/ai_generated_photo_from_email_profile_lookup/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/ai_generated_photo_from_email_profile_lookup/client.py b/src/gooey/ai_generated_photo_from_email_profile_lookup/client.py deleted file mode 100644 index b2f5a86..0000000 --- a/src/gooey/ai_generated_photo_from_email_profile_lookup/client.py +++ /dev/null @@ -1,817 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.internal_server_error import InternalServerError -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3 -from ..types.email_face_inpainting_page_request_selected_model import EmailFaceInpaintingPageRequestSelectedModel -from ..types.email_face_inpainting_page_response import EmailFaceInpaintingPageResponse -from ..types.email_face_inpainting_page_status_response import EmailFaceInpaintingPageStatusResponse -from ..types.failed_reponse_model_v2 import FailedReponseModelV2 -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError -from ..types.recipe_function import RecipeFunction -from ..types.run_settings import RunSettings - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class AiGeneratedPhotoFromEmailProfileLookupClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def email_face_inpainting( - self, - *, - text_prompt: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - email_address: typing.Optional[str] = OMIT, - twitter_handle: typing.Optional[str] = OMIT, - face_scale: typing.Optional[float] = OMIT, - face_pos_x: typing.Optional[float] = OMIT, - face_pos_y: typing.Optional[float] = OMIT, - selected_model: typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - upscale_factor: typing.Optional[float] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - should_send_email: typing.Optional[bool] = OMIT, - email_from: typing.Optional[str] = OMIT, - email_cc: typing.Optional[str] = OMIT, - email_bcc: typing.Optional[str] = OMIT, - email_subject: typing.Optional[str] = OMIT, - email_body: typing.Optional[str] = OMIT, - email_body_enable_html: typing.Optional[bool] = OMIT, - fallback_email_body: typing.Optional[str] = OMIT, - seed: typing.Optional[int] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> EmailFaceInpaintingPageResponse: - """ - Parameters - ---------- - text_prompt : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - email_address : typing.Optional[str] - - twitter_handle : typing.Optional[str] - - face_scale : typing.Optional[float] - - face_pos_x : typing.Optional[float] - - face_pos_y : typing.Optional[float] - - selected_model : typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - upscale_factor : typing.Optional[float] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - should_send_email : typing.Optional[bool] - - email_from : typing.Optional[str] - - email_cc : typing.Optional[str] - - email_bcc : typing.Optional[str] - - email_subject : typing.Optional[str] - - email_body : typing.Optional[str] - - email_body_enable_html : typing.Optional[bool] - - fallback_email_body : typing.Optional[str] - - seed : typing.Optional[int] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EmailFaceInpaintingPageResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.ai_generated_photo_from_email_profile_lookup.email_face_inpainting( - email_address="sean@dara.network", - text_prompt="winter's day in paris", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v2/EmailFaceInpainting/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "email_address": email_address, - "twitter_handle": twitter_handle, - "text_prompt": text_prompt, - "face_scale": face_scale, - "face_pos_x": face_pos_x, - "face_pos_y": face_pos_y, - "selected_model": selected_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "upscale_factor": upscale_factor, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "should_send_email": should_send_email, - "email_from": email_from, - "email_cc": email_cc, - "email_bcc": email_bcc, - "email_subject": email_subject, - "email_body": email_body, - "email_body_enable_html": email_body_enable_html, - "fallback_email_body": fallback_email_body, - "seed": seed, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(EmailFaceInpaintingPageResponse, parse_obj_as(type_=EmailFaceInpaintingPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def async_email_face_inpainting( - self, - *, - text_prompt: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - email_address: typing.Optional[str] = OMIT, - twitter_handle: typing.Optional[str] = OMIT, - face_scale: typing.Optional[float] = OMIT, - face_pos_x: typing.Optional[float] = OMIT, - face_pos_y: typing.Optional[float] = OMIT, - selected_model: typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - upscale_factor: typing.Optional[float] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - should_send_email: typing.Optional[bool] = OMIT, - email_from: typing.Optional[str] = OMIT, - email_cc: typing.Optional[str] = OMIT, - email_bcc: typing.Optional[str] = OMIT, - email_subject: typing.Optional[str] = OMIT, - email_body: typing.Optional[str] = OMIT, - email_body_enable_html: typing.Optional[bool] = OMIT, - fallback_email_body: typing.Optional[str] = OMIT, - seed: typing.Optional[int] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - text_prompt : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - email_address : typing.Optional[str] - - twitter_handle : typing.Optional[str] - - face_scale : typing.Optional[float] - - face_pos_x : typing.Optional[float] - - face_pos_y : typing.Optional[float] - - selected_model : typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - upscale_factor : typing.Optional[float] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - should_send_email : typing.Optional[bool] - - email_from : typing.Optional[str] - - email_cc : typing.Optional[str] - - email_bcc : typing.Optional[str] - - email_subject : typing.Optional[str] - - email_body : typing.Optional[str] - - email_body_enable_html : typing.Optional[bool] - - fallback_email_body : typing.Optional[str] - - seed : typing.Optional[int] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.ai_generated_photo_from_email_profile_lookup.async_email_face_inpainting( - email_address="sean@dara.network", - text_prompt="winter's day in paris", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/EmailFaceInpainting/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "email_address": email_address, - "twitter_handle": twitter_handle, - "text_prompt": text_prompt, - "face_scale": face_scale, - "face_pos_x": face_pos_x, - "face_pos_y": face_pos_y, - "selected_model": selected_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "upscale_factor": upscale_factor, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "should_send_email": should_send_email, - "email_from": email_from, - "email_cc": email_cc, - "email_bcc": email_bcc, - "email_subject": email_subject, - "email_body": email_body, - "email_body_enable_html": email_body_enable_html, - "fallback_email_body": fallback_email_body, - "seed": seed, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def status_email_face_inpainting( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> EmailFaceInpaintingPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EmailFaceInpaintingPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.ai_generated_photo_from_email_profile_lookup.status_email_face_inpainting( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/EmailFaceInpainting/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(EmailFaceInpaintingPageStatusResponse, parse_obj_as(type_=EmailFaceInpaintingPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncAiGeneratedPhotoFromEmailProfileLookupClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def email_face_inpainting( - self, - *, - text_prompt: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - email_address: typing.Optional[str] = OMIT, - twitter_handle: typing.Optional[str] = OMIT, - face_scale: typing.Optional[float] = OMIT, - face_pos_x: typing.Optional[float] = OMIT, - face_pos_y: typing.Optional[float] = OMIT, - selected_model: typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - upscale_factor: typing.Optional[float] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - should_send_email: typing.Optional[bool] = OMIT, - email_from: typing.Optional[str] = OMIT, - email_cc: typing.Optional[str] = OMIT, - email_bcc: typing.Optional[str] = OMIT, - email_subject: typing.Optional[str] = OMIT, - email_body: typing.Optional[str] = OMIT, - email_body_enable_html: typing.Optional[bool] = OMIT, - fallback_email_body: typing.Optional[str] = OMIT, - seed: typing.Optional[int] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> EmailFaceInpaintingPageResponse: - """ - Parameters - ---------- - text_prompt : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - email_address : typing.Optional[str] - - twitter_handle : typing.Optional[str] - - face_scale : typing.Optional[float] - - face_pos_x : typing.Optional[float] - - face_pos_y : typing.Optional[float] - - selected_model : typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - upscale_factor : typing.Optional[float] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - should_send_email : typing.Optional[bool] - - email_from : typing.Optional[str] - - email_cc : typing.Optional[str] - - email_bcc : typing.Optional[str] - - email_subject : typing.Optional[str] - - email_body : typing.Optional[str] - - email_body_enable_html : typing.Optional[bool] - - fallback_email_body : typing.Optional[str] - - seed : typing.Optional[int] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EmailFaceInpaintingPageResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.ai_generated_photo_from_email_profile_lookup.email_face_inpainting( - email_address="sean@dara.network", - text_prompt="winter's day in paris", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v2/EmailFaceInpainting/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "email_address": email_address, - "twitter_handle": twitter_handle, - "text_prompt": text_prompt, - "face_scale": face_scale, - "face_pos_x": face_pos_x, - "face_pos_y": face_pos_y, - "selected_model": selected_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "upscale_factor": upscale_factor, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "should_send_email": should_send_email, - "email_from": email_from, - "email_cc": email_cc, - "email_bcc": email_bcc, - "email_subject": email_subject, - "email_body": email_body, - "email_body_enable_html": email_body_enable_html, - "fallback_email_body": fallback_email_body, - "seed": seed, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(EmailFaceInpaintingPageResponse, parse_obj_as(type_=EmailFaceInpaintingPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def async_email_face_inpainting( - self, - *, - text_prompt: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - email_address: typing.Optional[str] = OMIT, - twitter_handle: typing.Optional[str] = OMIT, - face_scale: typing.Optional[float] = OMIT, - face_pos_x: typing.Optional[float] = OMIT, - face_pos_y: typing.Optional[float] = OMIT, - selected_model: typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - upscale_factor: typing.Optional[float] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - should_send_email: typing.Optional[bool] = OMIT, - email_from: typing.Optional[str] = OMIT, - email_cc: typing.Optional[str] = OMIT, - email_bcc: typing.Optional[str] = OMIT, - email_subject: typing.Optional[str] = OMIT, - email_body: typing.Optional[str] = OMIT, - email_body_enable_html: typing.Optional[bool] = OMIT, - fallback_email_body: typing.Optional[str] = OMIT, - seed: typing.Optional[int] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - text_prompt : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - email_address : typing.Optional[str] - - twitter_handle : typing.Optional[str] - - face_scale : typing.Optional[float] - - face_pos_x : typing.Optional[float] - - face_pos_y : typing.Optional[float] - - selected_model : typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - upscale_factor : typing.Optional[float] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - should_send_email : typing.Optional[bool] - - email_from : typing.Optional[str] - - email_cc : typing.Optional[str] - - email_bcc : typing.Optional[str] - - email_subject : typing.Optional[str] - - email_body : typing.Optional[str] - - email_body_enable_html : typing.Optional[bool] - - fallback_email_body : typing.Optional[str] - - seed : typing.Optional[int] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.ai_generated_photo_from_email_profile_lookup.async_email_face_inpainting( - email_address="sean@dara.network", - text_prompt="winter's day in paris", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/EmailFaceInpainting/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "email_address": email_address, - "twitter_handle": twitter_handle, - "text_prompt": text_prompt, - "face_scale": face_scale, - "face_pos_x": face_pos_x, - "face_pos_y": face_pos_y, - "selected_model": selected_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "upscale_factor": upscale_factor, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "should_send_email": should_send_email, - "email_from": email_from, - "email_cc": email_cc, - "email_bcc": email_bcc, - "email_subject": email_subject, - "email_body": email_body, - "email_body_enable_html": email_body_enable_html, - "fallback_email_body": fallback_email_body, - "seed": seed, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def status_email_face_inpainting( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> EmailFaceInpaintingPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EmailFaceInpaintingPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.ai_generated_photo_from_email_profile_lookup.status_email_face_inpainting( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/EmailFaceInpainting/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(EmailFaceInpaintingPageStatusResponse, parse_obj_as(type_=EmailFaceInpaintingPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/ai_image_with_a_face/__init__.py b/src/gooey/ai_image_with_a_face/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/ai_image_with_a_face/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/ai_image_with_a_face/client.py b/src/gooey/ai_image_with_a_face/client.py deleted file mode 100644 index 3351943..0000000 --- a/src/gooey/ai_image_with_a_face/client.py +++ /dev/null @@ -1,673 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.internal_server_error import InternalServerError -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3 -from ..types.face_inpainting_page_request_selected_model import FaceInpaintingPageRequestSelectedModel -from ..types.face_inpainting_page_response import FaceInpaintingPageResponse -from ..types.face_inpainting_page_status_response import FaceInpaintingPageStatusResponse -from ..types.failed_reponse_model_v2 import FailedReponseModelV2 -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError -from ..types.recipe_function import RecipeFunction -from ..types.run_settings import RunSettings - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class AiImageWithAFaceClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def face_inpainting( - self, - *, - input_image: str, - text_prompt: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - face_scale: typing.Optional[float] = OMIT, - face_pos_x: typing.Optional[float] = OMIT, - face_pos_y: typing.Optional[float] = OMIT, - selected_model: typing.Optional[FaceInpaintingPageRequestSelectedModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - upscale_factor: typing.Optional[float] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - seed: typing.Optional[int] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> FaceInpaintingPageResponse: - """ - Parameters - ---------- - input_image : str - - text_prompt : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - face_scale : typing.Optional[float] - - face_pos_x : typing.Optional[float] - - face_pos_y : typing.Optional[float] - - selected_model : typing.Optional[FaceInpaintingPageRequestSelectedModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - upscale_factor : typing.Optional[float] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - seed : typing.Optional[int] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - FaceInpaintingPageResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.ai_image_with_a_face.face_inpainting( - input_image="input_image", - text_prompt="tony stark from the iron man", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v2/FaceInpainting/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "text_prompt": text_prompt, - "face_scale": face_scale, - "face_pos_x": face_pos_x, - "face_pos_y": face_pos_y, - "selected_model": selected_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "upscale_factor": upscale_factor, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "seed": seed, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(FaceInpaintingPageResponse, parse_obj_as(type_=FaceInpaintingPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def async_face_inpainting( - self, - *, - input_image: str, - text_prompt: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - face_scale: typing.Optional[float] = OMIT, - face_pos_x: typing.Optional[float] = OMIT, - face_pos_y: typing.Optional[float] = OMIT, - selected_model: typing.Optional[FaceInpaintingPageRequestSelectedModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - upscale_factor: typing.Optional[float] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - seed: typing.Optional[int] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - input_image : str - - text_prompt : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - face_scale : typing.Optional[float] - - face_pos_x : typing.Optional[float] - - face_pos_y : typing.Optional[float] - - selected_model : typing.Optional[FaceInpaintingPageRequestSelectedModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - upscale_factor : typing.Optional[float] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - seed : typing.Optional[int] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.ai_image_with_a_face.async_face_inpainting( - input_image="input_image", - text_prompt="tony stark from the iron man", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/FaceInpainting/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "text_prompt": text_prompt, - "face_scale": face_scale, - "face_pos_x": face_pos_x, - "face_pos_y": face_pos_y, - "selected_model": selected_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "upscale_factor": upscale_factor, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "seed": seed, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def status_face_inpainting( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> FaceInpaintingPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - FaceInpaintingPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.ai_image_with_a_face.status_face_inpainting( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/FaceInpainting/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(FaceInpaintingPageStatusResponse, parse_obj_as(type_=FaceInpaintingPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncAiImageWithAFaceClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def face_inpainting( - self, - *, - input_image: str, - text_prompt: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - face_scale: typing.Optional[float] = OMIT, - face_pos_x: typing.Optional[float] = OMIT, - face_pos_y: typing.Optional[float] = OMIT, - selected_model: typing.Optional[FaceInpaintingPageRequestSelectedModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - upscale_factor: typing.Optional[float] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - seed: typing.Optional[int] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> FaceInpaintingPageResponse: - """ - Parameters - ---------- - input_image : str - - text_prompt : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - face_scale : typing.Optional[float] - - face_pos_x : typing.Optional[float] - - face_pos_y : typing.Optional[float] - - selected_model : typing.Optional[FaceInpaintingPageRequestSelectedModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - upscale_factor : typing.Optional[float] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - seed : typing.Optional[int] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - FaceInpaintingPageResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.ai_image_with_a_face.face_inpainting( - input_image="input_image", - text_prompt="tony stark from the iron man", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v2/FaceInpainting/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "text_prompt": text_prompt, - "face_scale": face_scale, - "face_pos_x": face_pos_x, - "face_pos_y": face_pos_y, - "selected_model": selected_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "upscale_factor": upscale_factor, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "seed": seed, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(FaceInpaintingPageResponse, parse_obj_as(type_=FaceInpaintingPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def async_face_inpainting( - self, - *, - input_image: str, - text_prompt: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - face_scale: typing.Optional[float] = OMIT, - face_pos_x: typing.Optional[float] = OMIT, - face_pos_y: typing.Optional[float] = OMIT, - selected_model: typing.Optional[FaceInpaintingPageRequestSelectedModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - upscale_factor: typing.Optional[float] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - seed: typing.Optional[int] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - input_image : str - - text_prompt : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - face_scale : typing.Optional[float] - - face_pos_x : typing.Optional[float] - - face_pos_y : typing.Optional[float] - - selected_model : typing.Optional[FaceInpaintingPageRequestSelectedModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - upscale_factor : typing.Optional[float] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - seed : typing.Optional[int] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.ai_image_with_a_face.async_face_inpainting( - input_image="input_image", - text_prompt="tony stark from the iron man", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/FaceInpainting/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "text_prompt": text_prompt, - "face_scale": face_scale, - "face_pos_x": face_pos_x, - "face_pos_y": face_pos_y, - "selected_model": selected_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "upscale_factor": upscale_factor, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "seed": seed, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def status_face_inpainting( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> FaceInpaintingPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - FaceInpaintingPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.ai_image_with_a_face.status_face_inpainting( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/FaceInpainting/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(FaceInpaintingPageStatusResponse, parse_obj_as(type_=FaceInpaintingPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/bulk_runner/client.py b/src/gooey/bulk_runner/client.py index 6facdfa..0c7faa8 100644 --- a/src/gooey/bulk_runner/client.py +++ b/src/gooey/bulk_runner/client.py @@ -5,279 +5,39 @@ from ..core.api_error import ApiError from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as from ..core.request_options import RequestOptions -from ..errors.internal_server_error import InternalServerError -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3 -from ..types.bulk_runner_page_response import BulkRunnerPageResponse -from ..types.bulk_runner_page_status_response import BulkRunnerPageStatusResponse -from ..types.failed_reponse_model_v2 import FailedReponseModelV2 -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError -from ..types.recipe_function import RecipeFunction -from ..types.run_settings import RunSettings - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) class BulkRunnerClient: def __init__(self, *, client_wrapper: SyncClientWrapper): self._client_wrapper = client_wrapper - def post( - self, - *, - documents: typing.Sequence[str], - run_urls: typing.Sequence[str], - input_columns: typing.Dict[str, str], - output_columns: typing.Dict[str, str], - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - eval_urls: typing.Optional[typing.Sequence[str]] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> BulkRunnerPageResponse: + def post(self, *, request_options: typing.Optional[RequestOptions] = None) -> None: """ Parameters ---------- - documents : typing.Sequence[str] - Upload or link to a CSV or google sheet that contains your sample input data. - For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. - Remember to includes header names in your CSV too. - - run_urls : typing.Sequence[str] - Provide one or more Gooey.AI workflow runs. - You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them. - - input_columns : typing.Dict[str, str] - For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it. - - output_columns : typing.Dict[str, str] - For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data. - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - eval_urls : typing.Optional[typing.Sequence[str]] - _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs. - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - BulkRunnerPageResponse - Successful Response + None Examples -------- from gooey import Gooey client = Gooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) - client.bulk_runner.post( - documents=["documents"], - run_urls=["run_urls"], - input_columns={"key": "value"}, - output_columns={"key": "value"}, - ) + client.bulk_runner.post() """ _response = self._client_wrapper.httpx_client.request( - "v2/bulk-runner/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "documents": documents, - "run_urls": run_urls, - "input_columns": input_columns, - "output_columns": output_columns, - "eval_urls": eval_urls, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, + "v2/bulk-runner/", method="POST", request_options=request_options ) try: if 200 <= _response.status_code < 300: - return typing.cast(BulkRunnerPageResponse, parse_obj_as(type_=BulkRunnerPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def async_bulk_runner( - self, - *, - documents: typing.Sequence[str], - run_urls: typing.Sequence[str], - input_columns: typing.Dict[str, str], - output_columns: typing.Dict[str, str], - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - eval_urls: typing.Optional[typing.Sequence[str]] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - documents : typing.Sequence[str] - Upload or link to a CSV or google sheet that contains your sample input data. - For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. - Remember to includes header names in your CSV too. - - run_urls : typing.Sequence[str] - Provide one or more Gooey.AI workflow runs. - You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them. - - input_columns : typing.Dict[str, str] - For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it. - - output_columns : typing.Dict[str, str] - For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data. - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - eval_urls : typing.Optional[typing.Sequence[str]] - _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs. - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.bulk_runner.async_bulk_runner( - documents=["documents"], - run_urls=["run_urls"], - input_columns={"key": "value"}, - output_columns={"key": "value"}, - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/bulk-runner/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "documents": documents, - "run_urls": run_urls, - "input_columns": input_columns, - "output_columns": output_columns, - "eval_urls": eval_urls, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def status_bulk_runner( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> BulkRunnerPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - BulkRunnerPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.bulk_runner.status_bulk_runner( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/bulk-runner/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(BulkRunnerPageStatusResponse, parse_obj_as(type_=BulkRunnerPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + return _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -288,241 +48,16 @@ class AsyncBulkRunnerClient: def __init__(self, *, client_wrapper: AsyncClientWrapper): self._client_wrapper = client_wrapper - async def post( - self, - *, - documents: typing.Sequence[str], - run_urls: typing.Sequence[str], - input_columns: typing.Dict[str, str], - output_columns: typing.Dict[str, str], - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - eval_urls: typing.Optional[typing.Sequence[str]] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> BulkRunnerPageResponse: - """ - Parameters - ---------- - documents : typing.Sequence[str] - Upload or link to a CSV or google sheet that contains your sample input data. - For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. - Remember to includes header names in your CSV too. - - run_urls : typing.Sequence[str] - Provide one or more Gooey.AI workflow runs. - You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them. - - input_columns : typing.Dict[str, str] - For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it. - - output_columns : typing.Dict[str, str] - For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data. - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - eval_urls : typing.Optional[typing.Sequence[str]] - _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs. - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - BulkRunnerPageResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.bulk_runner.post( - documents=["documents"], - run_urls=["run_urls"], - input_columns={"key": "value"}, - output_columns={"key": "value"}, - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v2/bulk-runner/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "documents": documents, - "run_urls": run_urls, - "input_columns": input_columns, - "output_columns": output_columns, - "eval_urls": eval_urls, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(BulkRunnerPageResponse, parse_obj_as(type_=BulkRunnerPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def async_bulk_runner( - self, - *, - documents: typing.Sequence[str], - run_urls: typing.Sequence[str], - input_columns: typing.Dict[str, str], - output_columns: typing.Dict[str, str], - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - eval_urls: typing.Optional[typing.Sequence[str]] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: + async def post(self, *, request_options: typing.Optional[RequestOptions] = None) -> None: """ Parameters ---------- - documents : typing.Sequence[str] - Upload or link to a CSV or google sheet that contains your sample input data. - For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. - Remember to includes header names in your CSV too. - - run_urls : typing.Sequence[str] - Provide one or more Gooey.AI workflow runs. - You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them. - - input_columns : typing.Dict[str, str] - For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it. - - output_columns : typing.Dict[str, str] - For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data. - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - eval_urls : typing.Optional[typing.Sequence[str]] - _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs. - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.bulk_runner.async_bulk_runner( - documents=["documents"], - run_urls=["run_urls"], - input_columns={"key": "value"}, - output_columns={"key": "value"}, - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/bulk-runner/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "documents": documents, - "run_urls": run_urls, - "input_columns": input_columns, - "output_columns": output_columns, - "eval_urls": eval_urls, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def status_bulk_runner( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> BulkRunnerPageStatusResponse: - """ - Parameters - ---------- - run_id : str - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - BulkRunnerPageStatusResponse - Successful Response + None Examples -------- @@ -531,37 +66,22 @@ async def status_bulk_runner( from gooey import AsyncGooey client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) async def main() -> None: - await client.bulk_runner.status_bulk_runner( - run_id="run_id", - ) + await client.bulk_runner.post() asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/bulk-runner/status/", method="GET", params={"run_id": run_id}, request_options=request_options + "v2/bulk-runner/", method="POST", request_options=request_options ) try: if 200 <= _response.status_code < 300: - return typing.cast(BulkRunnerPageStatusResponse, parse_obj_as(type_=BulkRunnerPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + return _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) diff --git a/src/gooey/chyron_plant_bot/__init__.py b/src/gooey/chyron_plant_bot/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/chyron_plant_bot/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/chyron_plant_bot/client.py b/src/gooey/chyron_plant_bot/client.py deleted file mode 100644 index 005f66e..0000000 --- a/src/gooey/chyron_plant_bot/client.py +++ /dev/null @@ -1,492 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.internal_server_error import InternalServerError -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3 -from ..types.chyron_plant_page_response import ChyronPlantPageResponse -from ..types.chyron_plant_page_status_response import ChyronPlantPageStatusResponse -from ..types.failed_reponse_model_v2 import FailedReponseModelV2 -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError -from ..types.recipe_function import RecipeFunction -from ..types.run_settings import RunSettings - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class ChyronPlantBotClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def chyron_plant( - self, - *, - midi_notes: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - midi_notes_prompt: typing.Optional[str] = OMIT, - chyron_prompt: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> ChyronPlantPageResponse: - """ - Parameters - ---------- - midi_notes : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - midi_notes_prompt : typing.Optional[str] - - chyron_prompt : typing.Optional[str] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ChyronPlantPageResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.chyron_plant_bot.chyron_plant( - midi_notes="C#1 B6 A2 A1 A3 A2", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v2/ChyronPlant/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "midi_notes": midi_notes, - "midi_notes_prompt": midi_notes_prompt, - "chyron_prompt": chyron_prompt, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(ChyronPlantPageResponse, parse_obj_as(type_=ChyronPlantPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def async_chyron_plant( - self, - *, - midi_notes: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - midi_notes_prompt: typing.Optional[str] = OMIT, - chyron_prompt: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - midi_notes : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - midi_notes_prompt : typing.Optional[str] - - chyron_prompt : typing.Optional[str] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.chyron_plant_bot.async_chyron_plant( - midi_notes="C#1 B6 A2 A1 A3 A2", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/ChyronPlant/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "midi_notes": midi_notes, - "midi_notes_prompt": midi_notes_prompt, - "chyron_prompt": chyron_prompt, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def status_chyron_plant( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> ChyronPlantPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ChyronPlantPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.chyron_plant_bot.status_chyron_plant( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/ChyronPlant/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(ChyronPlantPageStatusResponse, parse_obj_as(type_=ChyronPlantPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncChyronPlantBotClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def chyron_plant( - self, - *, - midi_notes: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - midi_notes_prompt: typing.Optional[str] = OMIT, - chyron_prompt: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> ChyronPlantPageResponse: - """ - Parameters - ---------- - midi_notes : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - midi_notes_prompt : typing.Optional[str] - - chyron_prompt : typing.Optional[str] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ChyronPlantPageResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.chyron_plant_bot.chyron_plant( - midi_notes="C#1 B6 A2 A1 A3 A2", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v2/ChyronPlant/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "midi_notes": midi_notes, - "midi_notes_prompt": midi_notes_prompt, - "chyron_prompt": chyron_prompt, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(ChyronPlantPageResponse, parse_obj_as(type_=ChyronPlantPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def async_chyron_plant( - self, - *, - midi_notes: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - midi_notes_prompt: typing.Optional[str] = OMIT, - chyron_prompt: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - midi_notes : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - midi_notes_prompt : typing.Optional[str] - - chyron_prompt : typing.Optional[str] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.chyron_plant_bot.async_chyron_plant( - midi_notes="C#1 B6 A2 A1 A3 A2", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/ChyronPlant/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "midi_notes": midi_notes, - "midi_notes_prompt": midi_notes_prompt, - "chyron_prompt": chyron_prompt, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def status_chyron_plant( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> ChyronPlantPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ChyronPlantPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.chyron_plant_bot.status_chyron_plant( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/ChyronPlant/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(ChyronPlantPageStatusResponse, parse_obj_as(type_=ChyronPlantPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/client.py b/src/gooey/client.py index ecdb38c..b21e22a 100644 --- a/src/gooey/client.py +++ b/src/gooey/client.py @@ -2,74 +2,124 @@ import os import typing +from json.decoder import JSONDecodeError import httpx -from .ai_animation_generator.client import AiAnimationGeneratorClient, AsyncAiAnimationGeneratorClient -from .ai_art_qr_code.client import AiArtQrCodeClient, AsyncAiArtQrCodeClient -from .ai_background_changer.client import AiBackgroundChangerClient, AsyncAiBackgroundChangerClient -from .ai_generated_photo_from_email_profile_lookup.client import ( - AiGeneratedPhotoFromEmailProfileLookupClient, - AsyncAiGeneratedPhotoFromEmailProfileLookupClient, -) -from .ai_image_with_a_face.client import AiImageWithAFaceClient, AsyncAiImageWithAFaceClient from .bulk_runner.client import AsyncBulkRunnerClient, BulkRunnerClient -from .chyron_plant_bot.client import AsyncChyronPlantBotClient, ChyronPlantBotClient -from .compare_ai_image_generators.client import AsyncCompareAiImageGeneratorsClient, CompareAiImageGeneratorsClient -from .compare_ai_image_upscalers.client import AsyncCompareAiImageUpscalersClient, CompareAiImageUpscalersClient -from .compare_ai_translations.client import AsyncCompareAiTranslationsClient, CompareAiTranslationsClient -from .compare_ai_voice_generators.client import AsyncCompareAiVoiceGeneratorsClient, CompareAiVoiceGeneratorsClient from .copilot_for_your_enterprise.client import AsyncCopilotForYourEnterpriseClient, CopilotForYourEnterpriseClient from .copilot_integrations.client import AsyncCopilotIntegrationsClient, CopilotIntegrationsClient from .core.api_error import ApiError from .core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from .create_a_perfect_seo_optimized_title_paragraph.client import ( - AsyncCreateAPerfectSeoOptimizedTitleParagraphClient, - CreateAPerfectSeoOptimizedTitleParagraphClient, -) -from .edit_an_image_with_ai_prompt.client import AsyncEditAnImageWithAiPromptClient, EditAnImageWithAiPromptClient +from .core.pydantic_utilities import parse_obj_as +from .core.request_options import RequestOptions from .embeddings.client import AsyncEmbeddingsClient, EmbeddingsClient from .environment import GooeyEnvironment +from .errors.payment_required_error import PaymentRequiredError +from .errors.too_many_requests_error import TooManyRequestsError +from .errors.unprocessable_entity_error import UnprocessableEntityError from .evaluator.client import AsyncEvaluatorClient, EvaluatorClient from .functions.client import AsyncFunctionsClient, FunctionsClient -from .generate_people_also_ask_seo_content.client import ( - AsyncGeneratePeopleAlsoAskSeoContentClient, - GeneratePeopleAlsoAskSeoContentClient, -) -from .generate_product_photo_backgrounds.client import ( - AsyncGenerateProductPhotoBackgroundsClient, - GenerateProductPhotoBackgroundsClient, -) -from .large_language_models_gpt3.client import AsyncLargeLanguageModelsGpt3Client, LargeLanguageModelsGpt3Client -from .letter_writer.client import AsyncLetterWriterClient, LetterWriterClient from .lip_syncing.client import AsyncLipSyncingClient, LipSyncingClient -from .lipsync_video_with_any_text.client import AsyncLipsyncVideoWithAnyTextClient, LipsyncVideoWithAnyTextClient from .misc.client import AsyncMiscClient, MiscClient -from .people_also_ask_answers_from_a_doc.client import ( - AsyncPeopleAlsoAskAnswersFromADocClient, - PeopleAlsoAskAnswersFromADocClient, -) -from .profile_lookup_gpt3for_ai_personalized_emails.client import ( - AsyncProfileLookupGpt3ForAiPersonalizedEmailsClient, - ProfileLookupGpt3ForAiPersonalizedEmailsClient, -) -from .render_image_search_results_with_ai.client import ( - AsyncRenderImageSearchResultsWithAiClient, - RenderImageSearchResultsWithAiClient, -) -from .search_your_docs_with_gpt.client import AsyncSearchYourDocsWithGptClient, SearchYourDocsWithGptClient from .smart_gpt.client import AsyncSmartGptClient, SmartGptClient -from .speech_recognition_translation.client import ( - AsyncSpeechRecognitionTranslationClient, - SpeechRecognitionTranslationClient, +from .types.animation_prompt import AnimationPrompt +from .types.asr_page_request_output_format import AsrPageRequestOutputFormat +from .types.asr_page_request_selected_model import AsrPageRequestSelectedModel +from .types.asr_page_request_translation_model import AsrPageRequestTranslationModel +from .types.asr_page_response import AsrPageResponse +from .types.bulk_runner_page_response import BulkRunnerPageResponse +from .types.compare_llm_page_request_response_format_type import CompareLlmPageRequestResponseFormatType +from .types.compare_llm_page_request_selected_models_item import CompareLlmPageRequestSelectedModelsItem +from .types.compare_llm_page_response import CompareLlmPageResponse +from .types.compare_text2img_page_request_scheduler import CompareText2ImgPageRequestScheduler +from .types.compare_text2img_page_request_selected_models_item import CompareText2ImgPageRequestSelectedModelsItem +from .types.compare_text2img_page_response import CompareText2ImgPageResponse +from .types.compare_upscaler_page_request_selected_models_item import CompareUpscalerPageRequestSelectedModelsItem +from .types.compare_upscaler_page_response import CompareUpscalerPageResponse +from .types.deforum_sd_page_request_selected_model import DeforumSdPageRequestSelectedModel +from .types.deforum_sd_page_response import DeforumSdPageResponse +from .types.doc_extract_page_request_response_format_type import DocExtractPageRequestResponseFormatType +from .types.doc_extract_page_request_selected_asr_model import DocExtractPageRequestSelectedAsrModel +from .types.doc_extract_page_request_selected_model import DocExtractPageRequestSelectedModel +from .types.doc_extract_page_response import DocExtractPageResponse +from .types.doc_search_page_request_citation_style import DocSearchPageRequestCitationStyle +from .types.doc_search_page_request_embedding_model import DocSearchPageRequestEmbeddingModel +from .types.doc_search_page_request_keyword_query import DocSearchPageRequestKeywordQuery +from .types.doc_search_page_request_response_format_type import DocSearchPageRequestResponseFormatType +from .types.doc_search_page_request_selected_model import DocSearchPageRequestSelectedModel +from .types.doc_search_page_response import DocSearchPageResponse +from .types.doc_summary_page_request_response_format_type import DocSummaryPageRequestResponseFormatType +from .types.doc_summary_page_request_selected_asr_model import DocSummaryPageRequestSelectedAsrModel +from .types.doc_summary_page_request_selected_model import DocSummaryPageRequestSelectedModel +from .types.doc_summary_page_response import DocSummaryPageResponse +from .types.email_face_inpainting_page_request_selected_model import EmailFaceInpaintingPageRequestSelectedModel +from .types.email_face_inpainting_page_response import EmailFaceInpaintingPageResponse +from .types.embeddings_page_request_selected_model import EmbeddingsPageRequestSelectedModel +from .types.embeddings_page_response import EmbeddingsPageResponse +from .types.face_inpainting_page_request_selected_model import FaceInpaintingPageRequestSelectedModel +from .types.face_inpainting_page_response import FaceInpaintingPageResponse +from .types.generic_error_response import GenericErrorResponse +from .types.google_gpt_page_request_embedding_model import GoogleGptPageRequestEmbeddingModel +from .types.google_gpt_page_request_response_format_type import GoogleGptPageRequestResponseFormatType +from .types.google_gpt_page_request_selected_model import GoogleGptPageRequestSelectedModel +from .types.google_gpt_page_response import GoogleGptPageResponse +from .types.google_image_gen_page_request_selected_model import GoogleImageGenPageRequestSelectedModel +from .types.google_image_gen_page_response import GoogleImageGenPageResponse +from .types.http_validation_error import HttpValidationError +from .types.image_segmentation_page_request_selected_model import ImageSegmentationPageRequestSelectedModel +from .types.image_segmentation_page_response import ImageSegmentationPageResponse +from .types.img2img_page_request_selected_controlnet_model import Img2ImgPageRequestSelectedControlnetModel +from .types.img2img_page_request_selected_model import Img2ImgPageRequestSelectedModel +from .types.img2img_page_response import Img2ImgPageResponse +from .types.lipsync_tts_page_request_openai_tts_model import LipsyncTtsPageRequestOpenaiTtsModel +from .types.lipsync_tts_page_request_openai_voice_name import LipsyncTtsPageRequestOpenaiVoiceName +from .types.lipsync_tts_page_request_selected_model import LipsyncTtsPageRequestSelectedModel +from .types.lipsync_tts_page_request_tts_provider import LipsyncTtsPageRequestTtsProvider +from .types.lipsync_tts_page_response import LipsyncTtsPageResponse +from .types.object_inpainting_page_request_selected_model import ObjectInpaintingPageRequestSelectedModel +from .types.object_inpainting_page_response import ObjectInpaintingPageResponse +from .types.qr_code_generator_page_request_image_prompt_controlnet_models_item import ( + QrCodeGeneratorPageRequestImagePromptControlnetModelsItem, ) -from .summarize_your_docs_with_gpt.client import AsyncSummarizeYourDocsWithGptClient, SummarizeYourDocsWithGptClient -from .synthetic_data_maker_for_videos_pd_fs.client import ( - AsyncSyntheticDataMakerForVideosPdFsClient, - SyntheticDataMakerForVideosPdFsClient, +from .types.qr_code_generator_page_request_scheduler import QrCodeGeneratorPageRequestScheduler +from .types.qr_code_generator_page_request_selected_controlnet_model_item import ( + QrCodeGeneratorPageRequestSelectedControlnetModelItem, ) -from .text_guided_audio_generator.client import AsyncTextGuidedAudioGeneratorClient, TextGuidedAudioGeneratorClient -from .web_search_gpt3.client import AsyncWebSearchGpt3Client, WebSearchGpt3Client +from .types.qr_code_generator_page_request_selected_model import QrCodeGeneratorPageRequestSelectedModel +from .types.qr_code_generator_page_response import QrCodeGeneratorPageResponse +from .types.recipe_function import RecipeFunction +from .types.related_qn_a_doc_page_request_citation_style import RelatedQnADocPageRequestCitationStyle +from .types.related_qn_a_doc_page_request_embedding_model import RelatedQnADocPageRequestEmbeddingModel +from .types.related_qn_a_doc_page_request_keyword_query import RelatedQnADocPageRequestKeywordQuery +from .types.related_qn_a_doc_page_request_response_format_type import RelatedQnADocPageRequestResponseFormatType +from .types.related_qn_a_doc_page_request_selected_model import RelatedQnADocPageRequestSelectedModel +from .types.related_qn_a_doc_page_response import RelatedQnADocPageResponse +from .types.related_qn_a_page_request_embedding_model import RelatedQnAPageRequestEmbeddingModel +from .types.related_qn_a_page_request_response_format_type import RelatedQnAPageRequestResponseFormatType +from .types.related_qn_a_page_request_selected_model import RelatedQnAPageRequestSelectedModel +from .types.related_qn_a_page_response import RelatedQnAPageResponse +from .types.run_settings import RunSettings +from .types.sad_talker_settings import SadTalkerSettings +from .types.seo_summary_page_request_response_format_type import SeoSummaryPageRequestResponseFormatType +from .types.seo_summary_page_request_selected_model import SeoSummaryPageRequestSelectedModel +from .types.seo_summary_page_response import SeoSummaryPageResponse +from .types.serp_search_location import SerpSearchLocation +from .types.serp_search_type import SerpSearchType +from .types.social_lookup_email_page_request_response_format_type import SocialLookupEmailPageRequestResponseFormatType +from .types.social_lookup_email_page_request_selected_model import SocialLookupEmailPageRequestSelectedModel +from .types.social_lookup_email_page_response import SocialLookupEmailPageResponse +from .types.text2audio_page_response import Text2AudioPageResponse +from .types.text_to_speech_page_request_openai_tts_model import TextToSpeechPageRequestOpenaiTtsModel +from .types.text_to_speech_page_request_openai_voice_name import TextToSpeechPageRequestOpenaiVoiceName +from .types.text_to_speech_page_request_tts_provider import TextToSpeechPageRequestTtsProvider +from .types.text_to_speech_page_response import TextToSpeechPageResponse +from .types.translation_page_request_selected_model import TranslationPageRequestSelectedModel +from .types.translation_page_response import TranslationPageResponse +from .types.vcard import Vcard + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) class Gooey: @@ -90,7 +140,6 @@ class Gooey: - authorization : typing.Optional[str] api_key : typing.Optional[typing.Union[str, typing.Callable[[], str]]] timeout : typing.Optional[float] The timeout to be used, in seconds, for requests. By default the timeout is 60 seconds, unless a custom httpx client is used, in which case this default is not enforced. @@ -106,7 +155,6 @@ class Gooey: from gooey import Gooey client = Gooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) """ @@ -116,7 +164,6 @@ def __init__( *, base_url: typing.Optional[str] = None, environment: GooeyEnvironment = GooeyEnvironment.DEFAULT, - authorization: typing.Optional[str] = None, api_key: typing.Optional[typing.Union[str, typing.Callable[[], str]]] = os.getenv("GOOEY_API_KEY"), timeout: typing.Optional[float] = None, follow_redirects: typing.Optional[bool] = True, @@ -127,7 +174,6 @@ def __init__( raise ApiError(body="The client must be instantiated be either passing in api_key or setting GOOEY_API_KEY") self._client_wrapper = SyncClientWrapper( base_url=_get_base_url(base_url=base_url, environment=environment), - authorization=authorization, api_key=api_key, httpx_client=httpx_client if httpx_client is not None @@ -138,174 +184,7168 @@ def __init__( ) self.copilot_integrations = CopilotIntegrationsClient(client_wrapper=self._client_wrapper) self.copilot_for_your_enterprise = CopilotForYourEnterpriseClient(client_wrapper=self._client_wrapper) - self.ai_animation_generator = AiAnimationGeneratorClient(client_wrapper=self._client_wrapper) - self.ai_art_qr_code = AiArtQrCodeClient(client_wrapper=self._client_wrapper) - self.generate_people_also_ask_seo_content = GeneratePeopleAlsoAskSeoContentClient( - client_wrapper=self._client_wrapper - ) - self.create_a_perfect_seo_optimized_title_paragraph = CreateAPerfectSeoOptimizedTitleParagraphClient( - client_wrapper=self._client_wrapper - ) - self.web_search_gpt3 = WebSearchGpt3Client(client_wrapper=self._client_wrapper) - self.profile_lookup_gpt3for_ai_personalized_emails = ProfileLookupGpt3ForAiPersonalizedEmailsClient( - client_wrapper=self._client_wrapper - ) - self.bulk_runner = BulkRunnerClient(client_wrapper=self._client_wrapper) self.evaluator = EvaluatorClient(client_wrapper=self._client_wrapper) - self.synthetic_data_maker_for_videos_pd_fs = SyntheticDataMakerForVideosPdFsClient( - client_wrapper=self._client_wrapper - ) - self.large_language_models_gpt3 = LargeLanguageModelsGpt3Client(client_wrapper=self._client_wrapper) - self.search_your_docs_with_gpt = SearchYourDocsWithGptClient(client_wrapper=self._client_wrapper) self.smart_gpt = SmartGptClient(client_wrapper=self._client_wrapper) - self.summarize_your_docs_with_gpt = SummarizeYourDocsWithGptClient(client_wrapper=self._client_wrapper) self.functions = FunctionsClient(client_wrapper=self._client_wrapper) self.lip_syncing = LipSyncingClient(client_wrapper=self._client_wrapper) - self.lipsync_video_with_any_text = LipsyncVideoWithAnyTextClient(client_wrapper=self._client_wrapper) - self.compare_ai_voice_generators = CompareAiVoiceGeneratorsClient(client_wrapper=self._client_wrapper) - self.speech_recognition_translation = SpeechRecognitionTranslationClient(client_wrapper=self._client_wrapper) - self.text_guided_audio_generator = TextGuidedAudioGeneratorClient(client_wrapper=self._client_wrapper) - self.compare_ai_translations = CompareAiTranslationsClient(client_wrapper=self._client_wrapper) - self.edit_an_image_with_ai_prompt = EditAnImageWithAiPromptClient(client_wrapper=self._client_wrapper) - self.compare_ai_image_generators = CompareAiImageGeneratorsClient(client_wrapper=self._client_wrapper) - self.generate_product_photo_backgrounds = GenerateProductPhotoBackgroundsClient( - client_wrapper=self._client_wrapper - ) - self.ai_image_with_a_face = AiImageWithAFaceClient(client_wrapper=self._client_wrapper) - self.ai_generated_photo_from_email_profile_lookup = AiGeneratedPhotoFromEmailProfileLookupClient( - client_wrapper=self._client_wrapper - ) - self.render_image_search_results_with_ai = RenderImageSearchResultsWithAiClient( - client_wrapper=self._client_wrapper - ) - self.ai_background_changer = AiBackgroundChangerClient(client_wrapper=self._client_wrapper) - self.compare_ai_image_upscalers = CompareAiImageUpscalersClient(client_wrapper=self._client_wrapper) - self.chyron_plant_bot = ChyronPlantBotClient(client_wrapper=self._client_wrapper) - self.letter_writer = LetterWriterClient(client_wrapper=self._client_wrapper) + self.misc = MiscClient(client_wrapper=self._client_wrapper) + self.bulk_runner = BulkRunnerClient(client_wrapper=self._client_wrapper) self.embeddings = EmbeddingsClient(client_wrapper=self._client_wrapper) - self.people_also_ask_answers_from_a_doc = PeopleAlsoAskAnswersFromADocClient( - client_wrapper=self._client_wrapper + + def animate( + self, + *, + animation_prompts: typing.Sequence[AnimationPrompt], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + max_frames: typing.Optional[int] = OMIT, + selected_model: typing.Optional[DeforumSdPageRequestSelectedModel] = OMIT, + animation_mode: typing.Optional[str] = OMIT, + zoom: typing.Optional[str] = OMIT, + translation_x: typing.Optional[str] = OMIT, + translation_y: typing.Optional[str] = OMIT, + rotation3d_x: typing.Optional[str] = OMIT, + rotation3d_y: typing.Optional[str] = OMIT, + rotation3d_z: typing.Optional[str] = OMIT, + fps: typing.Optional[int] = OMIT, + seed: typing.Optional[int] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> DeforumSdPageResponse: + """ + Parameters + ---------- + animation_prompts : typing.Sequence[AnimationPrompt] + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + max_frames : typing.Optional[int] + + selected_model : typing.Optional[DeforumSdPageRequestSelectedModel] + + animation_mode : typing.Optional[str] + + zoom : typing.Optional[str] + + translation_x : typing.Optional[str] + + translation_y : typing.Optional[str] + + rotation3d_x : typing.Optional[str] + + rotation3d_y : typing.Optional[str] + + rotation3d_z : typing.Optional[str] + + fps : typing.Optional[int] + + seed : typing.Optional[int] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + DeforumSdPageResponse + Successful Response + + Examples + -------- + from gooey import AnimationPrompt, Gooey + + client = Gooey( + api_key="YOUR_API_KEY", ) - self.misc = MiscClient(client_wrapper=self._client_wrapper) + client.animate( + animation_prompts=[ + AnimationPrompt( + frame="frame", + prompt="prompt", + ) + ], + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/DeforumSD/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "animation_prompts": animation_prompts, + "max_frames": max_frames, + "selected_model": selected_model, + "animation_mode": animation_mode, + "zoom": zoom, + "translation_x": translation_x, + "translation_y": translation_y, + "rotation_3d_x": rotation3d_x, + "rotation_3d_y": rotation3d_y, + "rotation_3d_z": rotation3d_z, + "fps": fps, + "seed": seed, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(DeforumSdPageResponse, parse_obj_as(type_=DeforumSdPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + def qr_code( + self, + *, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + qr_code_data: typing.Optional[str] = OMIT, + qr_code_input_image: typing.Optional[str] = OMIT, + qr_code_vcard: typing.Optional[Vcard] = OMIT, + qr_code_file: typing.Optional[str] = OMIT, + use_url_shortener: typing.Optional[bool] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + image_prompt: typing.Optional[str] = OMIT, + image_prompt_controlnet_models: typing.Optional[ + typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem] + ] = OMIT, + image_prompt_strength: typing.Optional[float] = OMIT, + image_prompt_scale: typing.Optional[float] = OMIT, + image_prompt_pos_x: typing.Optional[float] = OMIT, + image_prompt_pos_y: typing.Optional[float] = OMIT, + selected_model: typing.Optional[QrCodeGeneratorPageRequestSelectedModel] = OMIT, + selected_controlnet_model: typing.Optional[ + typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem] + ] = OMIT, + output_width: typing.Optional[int] = OMIT, + output_height: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + scheduler: typing.Optional[QrCodeGeneratorPageRequestScheduler] = OMIT, + seed: typing.Optional[int] = OMIT, + obj_scale: typing.Optional[float] = OMIT, + obj_pos_x: typing.Optional[float] = OMIT, + obj_pos_y: typing.Optional[float] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> QrCodeGeneratorPageResponse: + """ + Parameters + ---------- + text_prompt : str -class AsyncGooey: - """ - Use this class to access the different functions within the SDK. You can instantiate any number of clients with different configuration that will propagate to these functions. + example_id : typing.Optional[str] - Parameters - ---------- - base_url : typing.Optional[str] - The base url to use for requests from the client. + functions : typing.Optional[typing.Sequence[RecipeFunction]] - environment : GooeyEnvironment - The environment to use for requests from the client. from .environment import GooeyEnvironment + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + qr_code_data : typing.Optional[str] + qr_code_input_image : typing.Optional[str] - Defaults to GooeyEnvironment.DEFAULT + qr_code_vcard : typing.Optional[Vcard] + qr_code_file : typing.Optional[str] + use_url_shortener : typing.Optional[bool] - authorization : typing.Optional[str] - api_key : typing.Optional[typing.Union[str, typing.Callable[[], str]]] - timeout : typing.Optional[float] - The timeout to be used, in seconds, for requests. By default the timeout is 60 seconds, unless a custom httpx client is used, in which case this default is not enforced. + negative_prompt : typing.Optional[str] - follow_redirects : typing.Optional[bool] - Whether the default httpx client follows redirects or not, this is irrelevant if a custom httpx client is passed in. + image_prompt : typing.Optional[str] - httpx_client : typing.Optional[httpx.AsyncClient] - The httpx client to use for making requests, a preconfigured client is used by default, however this is useful should you want to pass in any custom httpx configuration. + image_prompt_controlnet_models : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]] - Examples - -------- - from gooey import AsyncGooey + image_prompt_strength : typing.Optional[float] - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - """ + image_prompt_scale : typing.Optional[float] - def __init__( + image_prompt_pos_x : typing.Optional[float] + + image_prompt_pos_y : typing.Optional[float] + + selected_model : typing.Optional[QrCodeGeneratorPageRequestSelectedModel] + + selected_controlnet_model : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem]] + + output_width : typing.Optional[int] + + output_height : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + scheduler : typing.Optional[QrCodeGeneratorPageRequestScheduler] + + seed : typing.Optional[int] + + obj_scale : typing.Optional[float] + + obj_pos_x : typing.Optional[float] + + obj_pos_y : typing.Optional[float] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + QrCodeGeneratorPageResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.qr_code( + text_prompt="text_prompt", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/art-qr-code/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "qr_code_data": qr_code_data, + "qr_code_input_image": qr_code_input_image, + "qr_code_vcard": qr_code_vcard, + "qr_code_file": qr_code_file, + "use_url_shortener": use_url_shortener, + "text_prompt": text_prompt, + "negative_prompt": negative_prompt, + "image_prompt": image_prompt, + "image_prompt_controlnet_models": image_prompt_controlnet_models, + "image_prompt_strength": image_prompt_strength, + "image_prompt_scale": image_prompt_scale, + "image_prompt_pos_x": image_prompt_pos_x, + "image_prompt_pos_y": image_prompt_pos_y, + "selected_model": selected_model, + "selected_controlnet_model": selected_controlnet_model, + "output_width": output_width, + "output_height": output_height, + "guidance_scale": guidance_scale, + "controlnet_conditioning_scale": controlnet_conditioning_scale, + "num_outputs": num_outputs, + "quality": quality, + "scheduler": scheduler, + "seed": seed, + "obj_scale": obj_scale, + "obj_pos_x": obj_pos_x, + "obj_pos_y": obj_pos_y, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(QrCodeGeneratorPageResponse, parse_obj_as(type_=QrCodeGeneratorPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def seo_people_also_ask( self, *, - base_url: typing.Optional[str] = None, - environment: GooeyEnvironment = GooeyEnvironment.DEFAULT, - authorization: typing.Optional[str] = None, - api_key: typing.Optional[typing.Union[str, typing.Callable[[], str]]] = os.getenv("GOOEY_API_KEY"), - timeout: typing.Optional[float] = None, - follow_redirects: typing.Optional[bool] = True, - httpx_client: typing.Optional[httpx.AsyncClient] = None - ): - _defaulted_timeout = timeout if timeout is not None else 60 if httpx_client is None else None - if api_key is None: - raise ApiError(body="The client must be instantiated be either passing in api_key or setting GOOEY_API_KEY") - self._client_wrapper = AsyncClientWrapper( - base_url=_get_base_url(base_url=base_url, environment=environment), - authorization=authorization, - api_key=api_key, - httpx_client=httpx_client - if httpx_client is not None - else httpx.AsyncClient(timeout=_defaulted_timeout, follow_redirects=follow_redirects) - if follow_redirects is not None - else httpx.AsyncClient(timeout=_defaulted_timeout), - timeout=_defaulted_timeout, + search_query: str, + site_filter: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + query_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[RelatedQnAPageRequestSelectedModel] = OMIT, + max_search_urls: typing.Optional[int] = OMIT, + max_references: typing.Optional[int] = OMIT, + max_context_words: typing.Optional[int] = OMIT, + scroll_jump: typing.Optional[int] = OMIT, + embedding_model: typing.Optional[RelatedQnAPageRequestEmbeddingModel] = OMIT, + dense_weight: typing.Optional[float] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[RelatedQnAPageRequestResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, + serp_search_type: typing.Optional[SerpSearchType] = OMIT, + scaleserp_search_field: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> RelatedQnAPageResponse: + """ + Parameters + ---------- + search_query : str + + site_filter : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + task_instructions : typing.Optional[str] + + query_instructions : typing.Optional[str] + + selected_model : typing.Optional[RelatedQnAPageRequestSelectedModel] + + max_search_urls : typing.Optional[int] + + max_references : typing.Optional[int] + + max_context_words : typing.Optional[int] + + scroll_jump : typing.Optional[int] + + embedding_model : typing.Optional[RelatedQnAPageRequestEmbeddingModel] + + dense_weight : typing.Optional[float] + + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[RelatedQnAPageRequestResponseFormatType] + + serp_search_location : typing.Optional[SerpSearchLocation] + + scaleserp_locations : typing.Optional[typing.Sequence[str]] + DEPRECATED: use `serp_search_location` instead + + serp_search_type : typing.Optional[SerpSearchType] + + scaleserp_search_field : typing.Optional[str] + DEPRECATED: use `serp_search_type` instead + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + RelatedQnAPageResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", ) - self.copilot_integrations = AsyncCopilotIntegrationsClient(client_wrapper=self._client_wrapper) - self.copilot_for_your_enterprise = AsyncCopilotForYourEnterpriseClient(client_wrapper=self._client_wrapper) - self.ai_animation_generator = AsyncAiAnimationGeneratorClient(client_wrapper=self._client_wrapper) - self.ai_art_qr_code = AsyncAiArtQrCodeClient(client_wrapper=self._client_wrapper) - self.generate_people_also_ask_seo_content = AsyncGeneratePeopleAlsoAskSeoContentClient( - client_wrapper=self._client_wrapper + client.seo_people_also_ask( + search_query="search_query", + site_filter="site_filter", ) - self.create_a_perfect_seo_optimized_title_paragraph = AsyncCreateAPerfectSeoOptimizedTitleParagraphClient( - client_wrapper=self._client_wrapper + """ + _response = self._client_wrapper.httpx_client.request( + "v3/related-qna-maker/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "search_query": search_query, + "site_filter": site_filter, + "task_instructions": task_instructions, + "query_instructions": query_instructions, + "selected_model": selected_model, + "max_search_urls": max_search_urls, + "max_references": max_references, + "max_context_words": max_context_words, + "scroll_jump": scroll_jump, + "embedding_model": embedding_model, + "dense_weight": dense_weight, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "serp_search_type": serp_search_type, + "scaleserp_search_field": scaleserp_search_field, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, ) - self.web_search_gpt3 = AsyncWebSearchGpt3Client(client_wrapper=self._client_wrapper) - self.profile_lookup_gpt3for_ai_personalized_emails = AsyncProfileLookupGpt3ForAiPersonalizedEmailsClient( - client_wrapper=self._client_wrapper + try: + if 200 <= _response.status_code < 300: + return typing.cast(RelatedQnAPageResponse, parse_obj_as(type_=RelatedQnAPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def seo_content( + self, + *, + search_query: str, + keywords: str, + title: str, + company_url: str, + example_id: typing.Optional[str] = None, + task_instructions: typing.Optional[str] = OMIT, + enable_html: typing.Optional[bool] = OMIT, + selected_model: typing.Optional[SeoSummaryPageRequestSelectedModel] = OMIT, + max_search_urls: typing.Optional[int] = OMIT, + enable_crosslinks: typing.Optional[bool] = OMIT, + seed: typing.Optional[int] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[SeoSummaryPageRequestResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, + serp_search_type: typing.Optional[SerpSearchType] = OMIT, + scaleserp_search_field: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> SeoSummaryPageResponse: + """ + Parameters + ---------- + search_query : str + + keywords : str + + title : str + + company_url : str + + example_id : typing.Optional[str] + + task_instructions : typing.Optional[str] + + enable_html : typing.Optional[bool] + + selected_model : typing.Optional[SeoSummaryPageRequestSelectedModel] + + max_search_urls : typing.Optional[int] + + enable_crosslinks : typing.Optional[bool] + + seed : typing.Optional[int] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[SeoSummaryPageRequestResponseFormatType] + + serp_search_location : typing.Optional[SerpSearchLocation] + + scaleserp_locations : typing.Optional[typing.Sequence[str]] + DEPRECATED: use `serp_search_location` instead + + serp_search_type : typing.Optional[SerpSearchType] + + scaleserp_search_field : typing.Optional[str] + DEPRECATED: use `serp_search_type` instead + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + SeoSummaryPageResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", ) - self.bulk_runner = AsyncBulkRunnerClient(client_wrapper=self._client_wrapper) - self.evaluator = AsyncEvaluatorClient(client_wrapper=self._client_wrapper) - self.synthetic_data_maker_for_videos_pd_fs = AsyncSyntheticDataMakerForVideosPdFsClient( - client_wrapper=self._client_wrapper + client.seo_content( + search_query="search_query", + keywords="keywords", + title="title", + company_url="company_url", ) - self.large_language_models_gpt3 = AsyncLargeLanguageModelsGpt3Client(client_wrapper=self._client_wrapper) - self.search_your_docs_with_gpt = AsyncSearchYourDocsWithGptClient(client_wrapper=self._client_wrapper) - self.smart_gpt = AsyncSmartGptClient(client_wrapper=self._client_wrapper) - self.summarize_your_docs_with_gpt = AsyncSummarizeYourDocsWithGptClient(client_wrapper=self._client_wrapper) - self.functions = AsyncFunctionsClient(client_wrapper=self._client_wrapper) - self.lip_syncing = AsyncLipSyncingClient(client_wrapper=self._client_wrapper) - self.lipsync_video_with_any_text = AsyncLipsyncVideoWithAnyTextClient(client_wrapper=self._client_wrapper) - self.compare_ai_voice_generators = AsyncCompareAiVoiceGeneratorsClient(client_wrapper=self._client_wrapper) - self.speech_recognition_translation = AsyncSpeechRecognitionTranslationClient( - client_wrapper=self._client_wrapper - ) - self.text_guided_audio_generator = AsyncTextGuidedAudioGeneratorClient(client_wrapper=self._client_wrapper) - self.compare_ai_translations = AsyncCompareAiTranslationsClient(client_wrapper=self._client_wrapper) - self.edit_an_image_with_ai_prompt = AsyncEditAnImageWithAiPromptClient(client_wrapper=self._client_wrapper) - self.compare_ai_image_generators = AsyncCompareAiImageGeneratorsClient(client_wrapper=self._client_wrapper) - self.generate_product_photo_backgrounds = AsyncGenerateProductPhotoBackgroundsClient( - client_wrapper=self._client_wrapper - ) - self.ai_image_with_a_face = AsyncAiImageWithAFaceClient(client_wrapper=self._client_wrapper) - self.ai_generated_photo_from_email_profile_lookup = AsyncAiGeneratedPhotoFromEmailProfileLookupClient( - client_wrapper=self._client_wrapper - ) - self.render_image_search_results_with_ai = AsyncRenderImageSearchResultsWithAiClient( - client_wrapper=self._client_wrapper - ) - self.ai_background_changer = AsyncAiBackgroundChangerClient(client_wrapper=self._client_wrapper) - self.compare_ai_image_upscalers = AsyncCompareAiImageUpscalersClient(client_wrapper=self._client_wrapper) - self.chyron_plant_bot = AsyncChyronPlantBotClient(client_wrapper=self._client_wrapper) - self.letter_writer = AsyncLetterWriterClient(client_wrapper=self._client_wrapper) - self.embeddings = AsyncEmbeddingsClient(client_wrapper=self._client_wrapper) - self.people_also_ask_answers_from_a_doc = AsyncPeopleAlsoAskAnswersFromADocClient( - client_wrapper=self._client_wrapper + """ + _response = self._client_wrapper.httpx_client.request( + "v3/SEOSummary/async", + method="POST", + params={"example_id": example_id}, + json={ + "search_query": search_query, + "keywords": keywords, + "title": title, + "company_url": company_url, + "task_instructions": task_instructions, + "enable_html": enable_html, + "selected_model": selected_model, + "max_search_urls": max_search_urls, + "enable_crosslinks": enable_crosslinks, + "seed": seed, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "serp_search_type": serp_search_type, + "scaleserp_search_field": scaleserp_search_field, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, ) - self.misc = AsyncMiscClient(client_wrapper=self._client_wrapper) + try: + if 200 <= _response.status_code < 300: + return typing.cast(SeoSummaryPageResponse, parse_obj_as(type_=SeoSummaryPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def web_search_llm( + self, + *, + search_query: str, + site_filter: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + query_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[GoogleGptPageRequestSelectedModel] = OMIT, + max_search_urls: typing.Optional[int] = OMIT, + max_references: typing.Optional[int] = OMIT, + max_context_words: typing.Optional[int] = OMIT, + scroll_jump: typing.Optional[int] = OMIT, + embedding_model: typing.Optional[GoogleGptPageRequestEmbeddingModel] = OMIT, + dense_weight: typing.Optional[float] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[GoogleGptPageRequestResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, + serp_search_type: typing.Optional[SerpSearchType] = OMIT, + scaleserp_search_field: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> GoogleGptPageResponse: + """ + Parameters + ---------- + search_query : str + + site_filter : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + task_instructions : typing.Optional[str] + + query_instructions : typing.Optional[str] + + selected_model : typing.Optional[GoogleGptPageRequestSelectedModel] + + max_search_urls : typing.Optional[int] + + max_references : typing.Optional[int] + + max_context_words : typing.Optional[int] + + scroll_jump : typing.Optional[int] + + embedding_model : typing.Optional[GoogleGptPageRequestEmbeddingModel] + + dense_weight : typing.Optional[float] + + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[GoogleGptPageRequestResponseFormatType] + + serp_search_location : typing.Optional[SerpSearchLocation] + + scaleserp_locations : typing.Optional[typing.Sequence[str]] + DEPRECATED: use `serp_search_location` instead + + serp_search_type : typing.Optional[SerpSearchType] + + scaleserp_search_field : typing.Optional[str] + DEPRECATED: use `serp_search_type` instead + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + GoogleGptPageResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.web_search_llm( + search_query="search_query", + site_filter="site_filter", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/google-gpt/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "search_query": search_query, + "site_filter": site_filter, + "task_instructions": task_instructions, + "query_instructions": query_instructions, + "selected_model": selected_model, + "max_search_urls": max_search_urls, + "max_references": max_references, + "max_context_words": max_context_words, + "scroll_jump": scroll_jump, + "embedding_model": embedding_model, + "dense_weight": dense_weight, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "serp_search_type": serp_search_type, + "scaleserp_search_field": scaleserp_search_field, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(GoogleGptPageResponse, parse_obj_as(type_=GoogleGptPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def personalize_email( + self, + *, + email_address: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + input_prompt: typing.Optional[str] = OMIT, + selected_model: typing.Optional[SocialLookupEmailPageRequestSelectedModel] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[SocialLookupEmailPageRequestResponseFormatType] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> SocialLookupEmailPageResponse: + """ + Parameters + ---------- + email_address : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + input_prompt : typing.Optional[str] + + selected_model : typing.Optional[SocialLookupEmailPageRequestSelectedModel] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[SocialLookupEmailPageRequestResponseFormatType] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + SocialLookupEmailPageResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.personalize_email( + email_address="email_address", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/SocialLookupEmail/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "email_address": email_address, + "input_prompt": input_prompt, + "selected_model": selected_model, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(SocialLookupEmailPageResponse, parse_obj_as(type_=SocialLookupEmailPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def bulk_run( + self, + *, + documents: typing.Sequence[str], + run_urls: typing.Sequence[str], + input_columns: typing.Dict[str, str], + output_columns: typing.Dict[str, str], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + eval_urls: typing.Optional[typing.Sequence[str]] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> BulkRunnerPageResponse: + """ + Parameters + ---------- + documents : typing.Sequence[str] + + Upload or link to a CSV or google sheet that contains your sample input data. + For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. + Remember to includes header names in your CSV too. + + + run_urls : typing.Sequence[str] + + Provide one or more Gooey.AI workflow runs. + You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them. + + + input_columns : typing.Dict[str, str] + + For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it. + + + output_columns : typing.Dict[str, str] + + For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data. + + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + eval_urls : typing.Optional[typing.Sequence[str]] + + _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs. + + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + BulkRunnerPageResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.bulk_run( + documents=["documents"], + run_urls=["run_urls"], + input_columns={"key": "value"}, + output_columns={"key": "value"}, + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/bulk-runner/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "documents": documents, + "run_urls": run_urls, + "input_columns": input_columns, + "output_columns": output_columns, + "eval_urls": eval_urls, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(BulkRunnerPageResponse, parse_obj_as(type_=BulkRunnerPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def synthesize_data( + self, + *, + documents: typing.Sequence[str], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + sheet_url: typing.Optional[str] = OMIT, + selected_asr_model: typing.Optional[DocExtractPageRequestSelectedAsrModel] = OMIT, + google_translate_target: typing.Optional[str] = OMIT, + glossary_document: typing.Optional[str] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[DocExtractPageRequestSelectedModel] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[DocExtractPageRequestResponseFormatType] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> DocExtractPageResponse: + """ + Parameters + ---------- + documents : typing.Sequence[str] + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + sheet_url : typing.Optional[str] + + selected_asr_model : typing.Optional[DocExtractPageRequestSelectedAsrModel] + + google_translate_target : typing.Optional[str] + + glossary_document : typing.Optional[str] + Provide a glossary to customize translation and improve accuracy of domain-specific terms. + If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). + + task_instructions : typing.Optional[str] + + selected_model : typing.Optional[DocExtractPageRequestSelectedModel] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[DocExtractPageRequestResponseFormatType] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + DocExtractPageResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.synthesize_data( + documents=["documents"], + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/doc-extract/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "documents": documents, + "sheet_url": sheet_url, + "selected_asr_model": selected_asr_model, + "google_translate_target": google_translate_target, + "glossary_document": glossary_document, + "task_instructions": task_instructions, + "selected_model": selected_model, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(DocExtractPageResponse, parse_obj_as(type_=DocExtractPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def llm( + self, + *, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + input_prompt: typing.Optional[str] = OMIT, + selected_models: typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[CompareLlmPageRequestResponseFormatType] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> CompareLlmPageResponse: + """ + Parameters + ---------- + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + input_prompt : typing.Optional[str] + + selected_models : typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[CompareLlmPageRequestResponseFormatType] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CompareLlmPageResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.llm() + """ + _response = self._client_wrapper.httpx_client.request( + "v3/CompareLLM/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "input_prompt": input_prompt, + "selected_models": selected_models, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(CompareLlmPageResponse, parse_obj_as(type_=CompareLlmPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def rag( + self, + *, + search_query: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + keyword_query: typing.Optional[DocSearchPageRequestKeywordQuery] = OMIT, + documents: typing.Optional[typing.Sequence[str]] = OMIT, + max_references: typing.Optional[int] = OMIT, + max_context_words: typing.Optional[int] = OMIT, + scroll_jump: typing.Optional[int] = OMIT, + doc_extract_url: typing.Optional[str] = OMIT, + embedding_model: typing.Optional[DocSearchPageRequestEmbeddingModel] = OMIT, + dense_weight: typing.Optional[float] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + query_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[DocSearchPageRequestSelectedModel] = OMIT, + citation_style: typing.Optional[DocSearchPageRequestCitationStyle] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[DocSearchPageRequestResponseFormatType] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> DocSearchPageResponse: + """ + Parameters + ---------- + search_query : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + keyword_query : typing.Optional[DocSearchPageRequestKeywordQuery] + + documents : typing.Optional[typing.Sequence[str]] + + max_references : typing.Optional[int] + + max_context_words : typing.Optional[int] + + scroll_jump : typing.Optional[int] + + doc_extract_url : typing.Optional[str] + + embedding_model : typing.Optional[DocSearchPageRequestEmbeddingModel] + + dense_weight : typing.Optional[float] + + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + + task_instructions : typing.Optional[str] + + query_instructions : typing.Optional[str] + + selected_model : typing.Optional[DocSearchPageRequestSelectedModel] + + citation_style : typing.Optional[DocSearchPageRequestCitationStyle] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[DocSearchPageRequestResponseFormatType] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + DocSearchPageResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.rag( + search_query="search_query", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/doc-search/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "search_query": search_query, + "keyword_query": keyword_query, + "documents": documents, + "max_references": max_references, + "max_context_words": max_context_words, + "scroll_jump": scroll_jump, + "doc_extract_url": doc_extract_url, + "embedding_model": embedding_model, + "dense_weight": dense_weight, + "task_instructions": task_instructions, + "query_instructions": query_instructions, + "selected_model": selected_model, + "citation_style": citation_style, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(DocSearchPageResponse, parse_obj_as(type_=DocSearchPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def doc_summary( + self, + *, + documents: typing.Sequence[str], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + merge_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[DocSummaryPageRequestSelectedModel] = OMIT, + chain_type: typing.Optional[typing.Literal["map_reduce"]] = OMIT, + selected_asr_model: typing.Optional[DocSummaryPageRequestSelectedAsrModel] = OMIT, + google_translate_target: typing.Optional[str] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[DocSummaryPageRequestResponseFormatType] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> DocSummaryPageResponse: + """ + Parameters + ---------- + documents : typing.Sequence[str] + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + task_instructions : typing.Optional[str] + + merge_instructions : typing.Optional[str] + + selected_model : typing.Optional[DocSummaryPageRequestSelectedModel] + + chain_type : typing.Optional[typing.Literal["map_reduce"]] + + selected_asr_model : typing.Optional[DocSummaryPageRequestSelectedAsrModel] + + google_translate_target : typing.Optional[str] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[DocSummaryPageRequestResponseFormatType] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + DocSummaryPageResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.doc_summary( + documents=["documents"], + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/doc-summary/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "documents": documents, + "task_instructions": task_instructions, + "merge_instructions": merge_instructions, + "selected_model": selected_model, + "chain_type": chain_type, + "selected_asr_model": selected_asr_model, + "google_translate_target": google_translate_target, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(DocSummaryPageResponse, parse_obj_as(type_=DocSummaryPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def lipsync_tts( + self, + *, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + tts_provider: typing.Optional[LipsyncTtsPageRequestTtsProvider] = OMIT, + uberduck_voice_name: typing.Optional[str] = OMIT, + uberduck_speaking_rate: typing.Optional[float] = OMIT, + google_voice_name: typing.Optional[str] = OMIT, + google_speaking_rate: typing.Optional[float] = OMIT, + google_pitch: typing.Optional[float] = OMIT, + bark_history_prompt: typing.Optional[str] = OMIT, + elevenlabs_voice_name: typing.Optional[str] = OMIT, + elevenlabs_api_key: typing.Optional[str] = OMIT, + elevenlabs_voice_id: typing.Optional[str] = OMIT, + elevenlabs_model: typing.Optional[str] = OMIT, + elevenlabs_stability: typing.Optional[float] = OMIT, + elevenlabs_similarity_boost: typing.Optional[float] = OMIT, + elevenlabs_style: typing.Optional[float] = OMIT, + elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, + azure_voice_name: typing.Optional[str] = OMIT, + openai_voice_name: typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] = OMIT, + openai_tts_model: typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] = OMIT, + input_face: typing.Optional[str] = OMIT, + face_padding_top: typing.Optional[int] = OMIT, + face_padding_bottom: typing.Optional[int] = OMIT, + face_padding_left: typing.Optional[int] = OMIT, + face_padding_right: typing.Optional[int] = OMIT, + sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT, + selected_model: typing.Optional[LipsyncTtsPageRequestSelectedModel] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> LipsyncTtsPageResponse: + """ + Parameters + ---------- + text_prompt : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + tts_provider : typing.Optional[LipsyncTtsPageRequestTtsProvider] + + uberduck_voice_name : typing.Optional[str] + + uberduck_speaking_rate : typing.Optional[float] + + google_voice_name : typing.Optional[str] + + google_speaking_rate : typing.Optional[float] + + google_pitch : typing.Optional[float] + + bark_history_prompt : typing.Optional[str] + + elevenlabs_voice_name : typing.Optional[str] + Use `elevenlabs_voice_id` instead + + elevenlabs_api_key : typing.Optional[str] + + elevenlabs_voice_id : typing.Optional[str] + + elevenlabs_model : typing.Optional[str] + + elevenlabs_stability : typing.Optional[float] + + elevenlabs_similarity_boost : typing.Optional[float] + + elevenlabs_style : typing.Optional[float] + + elevenlabs_speaker_boost : typing.Optional[bool] + + azure_voice_name : typing.Optional[str] + + openai_voice_name : typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] + + openai_tts_model : typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] + + input_face : typing.Optional[str] + + face_padding_top : typing.Optional[int] + + face_padding_bottom : typing.Optional[int] + + face_padding_left : typing.Optional[int] + + face_padding_right : typing.Optional[int] + + sadtalker_settings : typing.Optional[SadTalkerSettings] + + selected_model : typing.Optional[LipsyncTtsPageRequestSelectedModel] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + LipsyncTtsPageResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.lipsync_tts( + text_prompt="text_prompt", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/LipsyncTTS/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "text_prompt": text_prompt, + "tts_provider": tts_provider, + "uberduck_voice_name": uberduck_voice_name, + "uberduck_speaking_rate": uberduck_speaking_rate, + "google_voice_name": google_voice_name, + "google_speaking_rate": google_speaking_rate, + "google_pitch": google_pitch, + "bark_history_prompt": bark_history_prompt, + "elevenlabs_voice_name": elevenlabs_voice_name, + "elevenlabs_api_key": elevenlabs_api_key, + "elevenlabs_voice_id": elevenlabs_voice_id, + "elevenlabs_model": elevenlabs_model, + "elevenlabs_stability": elevenlabs_stability, + "elevenlabs_similarity_boost": elevenlabs_similarity_boost, + "elevenlabs_style": elevenlabs_style, + "elevenlabs_speaker_boost": elevenlabs_speaker_boost, + "azure_voice_name": azure_voice_name, + "openai_voice_name": openai_voice_name, + "openai_tts_model": openai_tts_model, + "input_face": input_face, + "face_padding_top": face_padding_top, + "face_padding_bottom": face_padding_bottom, + "face_padding_left": face_padding_left, + "face_padding_right": face_padding_right, + "sadtalker_settings": sadtalker_settings, + "selected_model": selected_model, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(LipsyncTtsPageResponse, parse_obj_as(type_=LipsyncTtsPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def text_to_speech( + self, + *, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + tts_provider: typing.Optional[TextToSpeechPageRequestTtsProvider] = OMIT, + uberduck_voice_name: typing.Optional[str] = OMIT, + uberduck_speaking_rate: typing.Optional[float] = OMIT, + google_voice_name: typing.Optional[str] = OMIT, + google_speaking_rate: typing.Optional[float] = OMIT, + google_pitch: typing.Optional[float] = OMIT, + bark_history_prompt: typing.Optional[str] = OMIT, + elevenlabs_voice_name: typing.Optional[str] = OMIT, + elevenlabs_api_key: typing.Optional[str] = OMIT, + elevenlabs_voice_id: typing.Optional[str] = OMIT, + elevenlabs_model: typing.Optional[str] = OMIT, + elevenlabs_stability: typing.Optional[float] = OMIT, + elevenlabs_similarity_boost: typing.Optional[float] = OMIT, + elevenlabs_style: typing.Optional[float] = OMIT, + elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, + azure_voice_name: typing.Optional[str] = OMIT, + openai_voice_name: typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] = OMIT, + openai_tts_model: typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> TextToSpeechPageResponse: + """ + Parameters + ---------- + text_prompt : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + tts_provider : typing.Optional[TextToSpeechPageRequestTtsProvider] + + uberduck_voice_name : typing.Optional[str] + + uberduck_speaking_rate : typing.Optional[float] + + google_voice_name : typing.Optional[str] + + google_speaking_rate : typing.Optional[float] + + google_pitch : typing.Optional[float] + + bark_history_prompt : typing.Optional[str] + + elevenlabs_voice_name : typing.Optional[str] + Use `elevenlabs_voice_id` instead + + elevenlabs_api_key : typing.Optional[str] + + elevenlabs_voice_id : typing.Optional[str] + + elevenlabs_model : typing.Optional[str] + + elevenlabs_stability : typing.Optional[float] + + elevenlabs_similarity_boost : typing.Optional[float] + + elevenlabs_style : typing.Optional[float] + + elevenlabs_speaker_boost : typing.Optional[bool] + + azure_voice_name : typing.Optional[str] + + openai_voice_name : typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] + + openai_tts_model : typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + TextToSpeechPageResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.text_to_speech( + text_prompt="text_prompt", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/TextToSpeech/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "text_prompt": text_prompt, + "tts_provider": tts_provider, + "uberduck_voice_name": uberduck_voice_name, + "uberduck_speaking_rate": uberduck_speaking_rate, + "google_voice_name": google_voice_name, + "google_speaking_rate": google_speaking_rate, + "google_pitch": google_pitch, + "bark_history_prompt": bark_history_prompt, + "elevenlabs_voice_name": elevenlabs_voice_name, + "elevenlabs_api_key": elevenlabs_api_key, + "elevenlabs_voice_id": elevenlabs_voice_id, + "elevenlabs_model": elevenlabs_model, + "elevenlabs_stability": elevenlabs_stability, + "elevenlabs_similarity_boost": elevenlabs_similarity_boost, + "elevenlabs_style": elevenlabs_style, + "elevenlabs_speaker_boost": elevenlabs_speaker_boost, + "azure_voice_name": azure_voice_name, + "openai_voice_name": openai_voice_name, + "openai_tts_model": openai_tts_model, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(TextToSpeechPageResponse, parse_obj_as(type_=TextToSpeechPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def speech_recognition( + self, + *, + documents: typing.Sequence[str], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + selected_model: typing.Optional[AsrPageRequestSelectedModel] = OMIT, + language: typing.Optional[str] = OMIT, + translation_model: typing.Optional[AsrPageRequestTranslationModel] = OMIT, + output_format: typing.Optional[AsrPageRequestOutputFormat] = OMIT, + google_translate_target: typing.Optional[str] = OMIT, + translation_source: typing.Optional[str] = OMIT, + translation_target: typing.Optional[str] = OMIT, + glossary_document: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> AsrPageResponse: + """ + Parameters + ---------- + documents : typing.Sequence[str] + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + selected_model : typing.Optional[AsrPageRequestSelectedModel] + + language : typing.Optional[str] + + translation_model : typing.Optional[AsrPageRequestTranslationModel] + + output_format : typing.Optional[AsrPageRequestOutputFormat] + + google_translate_target : typing.Optional[str] + use `translation_model` & `translation_target` instead. + + translation_source : typing.Optional[str] + + translation_target : typing.Optional[str] + + glossary_document : typing.Optional[str] + Provide a glossary to customize translation and improve accuracy of domain-specific terms. + If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsrPageResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.speech_recognition( + documents=["documents"], + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/asr/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "documents": documents, + "selected_model": selected_model, + "language": language, + "translation_model": translation_model, + "output_format": output_format, + "google_translate_target": google_translate_target, + "translation_source": translation_source, + "translation_target": translation_target, + "glossary_document": glossary_document, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(AsrPageResponse, parse_obj_as(type_=AsrPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def text_to_music( + self, + *, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + duration_sec: typing.Optional[float] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + seed: typing.Optional[int] = OMIT, + sd2upscaling: typing.Optional[bool] = OMIT, + selected_models: typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> Text2AudioPageResponse: + """ + Parameters + ---------- + text_prompt : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + negative_prompt : typing.Optional[str] + + duration_sec : typing.Optional[float] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + seed : typing.Optional[int] + + sd2upscaling : typing.Optional[bool] + + selected_models : typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + Text2AudioPageResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.text_to_music( + text_prompt="text_prompt", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/text2audio/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "text_prompt": text_prompt, + "negative_prompt": negative_prompt, + "duration_sec": duration_sec, + "num_outputs": num_outputs, + "quality": quality, + "guidance_scale": guidance_scale, + "seed": seed, + "sd_2_upscaling": sd2upscaling, + "selected_models": selected_models, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(Text2AudioPageResponse, parse_obj_as(type_=Text2AudioPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def translate( + self, + *, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + texts: typing.Optional[typing.Sequence[str]] = OMIT, + selected_model: typing.Optional[TranslationPageRequestSelectedModel] = OMIT, + translation_source: typing.Optional[str] = OMIT, + translation_target: typing.Optional[str] = OMIT, + glossary_document: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> TranslationPageResponse: + """ + Parameters + ---------- + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + texts : typing.Optional[typing.Sequence[str]] + + selected_model : typing.Optional[TranslationPageRequestSelectedModel] + + translation_source : typing.Optional[str] + + translation_target : typing.Optional[str] + + glossary_document : typing.Optional[str] + Provide a glossary to customize translation and improve accuracy of domain-specific terms. + If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + TranslationPageResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.translate() + """ + _response = self._client_wrapper.httpx_client.request( + "v3/translate/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "texts": texts, + "selected_model": selected_model, + "translation_source": translation_source, + "translation_target": translation_target, + "glossary_document": glossary_document, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(TranslationPageResponse, parse_obj_as(type_=TranslationPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def remix_image( + self, + *, + input_image: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + text_prompt: typing.Optional[str] = OMIT, + selected_model: typing.Optional[Img2ImgPageRequestSelectedModel] = OMIT, + selected_controlnet_model: typing.Optional[Img2ImgPageRequestSelectedControlnetModel] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + output_width: typing.Optional[int] = OMIT, + output_height: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + prompt_strength: typing.Optional[float] = OMIT, + controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT, + seed: typing.Optional[int] = OMIT, + image_guidance_scale: typing.Optional[float] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> Img2ImgPageResponse: + """ + Parameters + ---------- + input_image : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + text_prompt : typing.Optional[str] + + selected_model : typing.Optional[Img2ImgPageRequestSelectedModel] + + selected_controlnet_model : typing.Optional[Img2ImgPageRequestSelectedControlnetModel] + + negative_prompt : typing.Optional[str] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + output_width : typing.Optional[int] + + output_height : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + prompt_strength : typing.Optional[float] + + controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]] + + seed : typing.Optional[int] + + image_guidance_scale : typing.Optional[float] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + Img2ImgPageResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.remix_image( + input_image="input_image", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/Img2Img/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "input_image": input_image, + "text_prompt": text_prompt, + "selected_model": selected_model, + "selected_controlnet_model": selected_controlnet_model, + "negative_prompt": negative_prompt, + "num_outputs": num_outputs, + "quality": quality, + "output_width": output_width, + "output_height": output_height, + "guidance_scale": guidance_scale, + "prompt_strength": prompt_strength, + "controlnet_conditioning_scale": controlnet_conditioning_scale, + "seed": seed, + "image_guidance_scale": image_guidance_scale, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(Img2ImgPageResponse, parse_obj_as(type_=Img2ImgPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def text_to_image( + self, + *, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + output_width: typing.Optional[int] = OMIT, + output_height: typing.Optional[int] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + dall_e3quality: typing.Optional[str] = OMIT, + dall_e3style: typing.Optional[str] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + seed: typing.Optional[int] = OMIT, + sd2upscaling: typing.Optional[bool] = OMIT, + selected_models: typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] = OMIT, + scheduler: typing.Optional[CompareText2ImgPageRequestScheduler] = OMIT, + edit_instruction: typing.Optional[str] = OMIT, + image_guidance_scale: typing.Optional[float] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> CompareText2ImgPageResponse: + """ + Parameters + ---------- + text_prompt : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + negative_prompt : typing.Optional[str] + + output_width : typing.Optional[int] + + output_height : typing.Optional[int] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + dall_e3quality : typing.Optional[str] + + dall_e3style : typing.Optional[str] + + guidance_scale : typing.Optional[float] + + seed : typing.Optional[int] + + sd2upscaling : typing.Optional[bool] + + selected_models : typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] + + scheduler : typing.Optional[CompareText2ImgPageRequestScheduler] + + edit_instruction : typing.Optional[str] + + image_guidance_scale : typing.Optional[float] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CompareText2ImgPageResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.text_to_image( + text_prompt="text_prompt", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/CompareText2Img/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "text_prompt": text_prompt, + "negative_prompt": negative_prompt, + "output_width": output_width, + "output_height": output_height, + "num_outputs": num_outputs, + "quality": quality, + "dall_e_3_quality": dall_e3quality, + "dall_e_3_style": dall_e3style, + "guidance_scale": guidance_scale, + "seed": seed, + "sd_2_upscaling": sd2upscaling, + "selected_models": selected_models, + "scheduler": scheduler, + "edit_instruction": edit_instruction, + "image_guidance_scale": image_guidance_scale, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(CompareText2ImgPageResponse, parse_obj_as(type_=CompareText2ImgPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def product_image( + self, + *, + input_image: str, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + obj_scale: typing.Optional[float] = OMIT, + obj_pos_x: typing.Optional[float] = OMIT, + obj_pos_y: typing.Optional[float] = OMIT, + mask_threshold: typing.Optional[float] = OMIT, + selected_model: typing.Optional[ObjectInpaintingPageRequestSelectedModel] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + output_width: typing.Optional[int] = OMIT, + output_height: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + sd2upscaling: typing.Optional[bool] = OMIT, + seed: typing.Optional[int] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> ObjectInpaintingPageResponse: + """ + Parameters + ---------- + input_image : str + + text_prompt : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + obj_scale : typing.Optional[float] + + obj_pos_x : typing.Optional[float] + + obj_pos_y : typing.Optional[float] + + mask_threshold : typing.Optional[float] + + selected_model : typing.Optional[ObjectInpaintingPageRequestSelectedModel] + + negative_prompt : typing.Optional[str] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + output_width : typing.Optional[int] + + output_height : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + sd2upscaling : typing.Optional[bool] + + seed : typing.Optional[int] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ObjectInpaintingPageResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.product_image( + input_image="input_image", + text_prompt="text_prompt", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/ObjectInpainting/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "input_image": input_image, + "text_prompt": text_prompt, + "obj_scale": obj_scale, + "obj_pos_x": obj_pos_x, + "obj_pos_y": obj_pos_y, + "mask_threshold": mask_threshold, + "selected_model": selected_model, + "negative_prompt": negative_prompt, + "num_outputs": num_outputs, + "quality": quality, + "output_width": output_width, + "output_height": output_height, + "guidance_scale": guidance_scale, + "sd_2_upscaling": sd2upscaling, + "seed": seed, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(ObjectInpaintingPageResponse, parse_obj_as(type_=ObjectInpaintingPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def portrait( + self, + *, + input_image: str, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + face_scale: typing.Optional[float] = OMIT, + face_pos_x: typing.Optional[float] = OMIT, + face_pos_y: typing.Optional[float] = OMIT, + selected_model: typing.Optional[FaceInpaintingPageRequestSelectedModel] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + upscale_factor: typing.Optional[float] = OMIT, + output_width: typing.Optional[int] = OMIT, + output_height: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + seed: typing.Optional[int] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> FaceInpaintingPageResponse: + """ + Parameters + ---------- + input_image : str + + text_prompt : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + face_scale : typing.Optional[float] + + face_pos_x : typing.Optional[float] + + face_pos_y : typing.Optional[float] + + selected_model : typing.Optional[FaceInpaintingPageRequestSelectedModel] + + negative_prompt : typing.Optional[str] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + upscale_factor : typing.Optional[float] + + output_width : typing.Optional[int] + + output_height : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + seed : typing.Optional[int] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + FaceInpaintingPageResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.portrait( + input_image="input_image", + text_prompt="tony stark from the iron man", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/FaceInpainting/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "input_image": input_image, + "text_prompt": text_prompt, + "face_scale": face_scale, + "face_pos_x": face_pos_x, + "face_pos_y": face_pos_y, + "selected_model": selected_model, + "negative_prompt": negative_prompt, + "num_outputs": num_outputs, + "quality": quality, + "upscale_factor": upscale_factor, + "output_width": output_width, + "output_height": output_height, + "guidance_scale": guidance_scale, + "seed": seed, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(FaceInpaintingPageResponse, parse_obj_as(type_=FaceInpaintingPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def image_from_email( + self, + *, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + email_address: typing.Optional[str] = OMIT, + twitter_handle: typing.Optional[str] = OMIT, + face_scale: typing.Optional[float] = OMIT, + face_pos_x: typing.Optional[float] = OMIT, + face_pos_y: typing.Optional[float] = OMIT, + selected_model: typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + upscale_factor: typing.Optional[float] = OMIT, + output_width: typing.Optional[int] = OMIT, + output_height: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + should_send_email: typing.Optional[bool] = OMIT, + email_from: typing.Optional[str] = OMIT, + email_cc: typing.Optional[str] = OMIT, + email_bcc: typing.Optional[str] = OMIT, + email_subject: typing.Optional[str] = OMIT, + email_body: typing.Optional[str] = OMIT, + email_body_enable_html: typing.Optional[bool] = OMIT, + fallback_email_body: typing.Optional[str] = OMIT, + seed: typing.Optional[int] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> EmailFaceInpaintingPageResponse: + """ + Parameters + ---------- + text_prompt : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + email_address : typing.Optional[str] + + twitter_handle : typing.Optional[str] + + face_scale : typing.Optional[float] + + face_pos_x : typing.Optional[float] + + face_pos_y : typing.Optional[float] + + selected_model : typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] + + negative_prompt : typing.Optional[str] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + upscale_factor : typing.Optional[float] + + output_width : typing.Optional[int] + + output_height : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + should_send_email : typing.Optional[bool] + + email_from : typing.Optional[str] + + email_cc : typing.Optional[str] + + email_bcc : typing.Optional[str] + + email_subject : typing.Optional[str] + + email_body : typing.Optional[str] + + email_body_enable_html : typing.Optional[bool] + + fallback_email_body : typing.Optional[str] + + seed : typing.Optional[int] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EmailFaceInpaintingPageResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.image_from_email( + email_address="sean@dara.network", + text_prompt="winter's day in paris", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/EmailFaceInpainting/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "email_address": email_address, + "twitter_handle": twitter_handle, + "text_prompt": text_prompt, + "face_scale": face_scale, + "face_pos_x": face_pos_x, + "face_pos_y": face_pos_y, + "selected_model": selected_model, + "negative_prompt": negative_prompt, + "num_outputs": num_outputs, + "quality": quality, + "upscale_factor": upscale_factor, + "output_width": output_width, + "output_height": output_height, + "guidance_scale": guidance_scale, + "should_send_email": should_send_email, + "email_from": email_from, + "email_cc": email_cc, + "email_bcc": email_bcc, + "email_subject": email_subject, + "email_body": email_body, + "email_body_enable_html": email_body_enable_html, + "fallback_email_body": fallback_email_body, + "seed": seed, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(EmailFaceInpaintingPageResponse, parse_obj_as(type_=EmailFaceInpaintingPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def image_from_web_search( + self, + *, + search_query: str, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, + selected_model: typing.Optional[GoogleImageGenPageRequestSelectedModel] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + prompt_strength: typing.Optional[float] = OMIT, + sd2upscaling: typing.Optional[bool] = OMIT, + seed: typing.Optional[int] = OMIT, + image_guidance_scale: typing.Optional[float] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> GoogleImageGenPageResponse: + """ + Parameters + ---------- + search_query : str + + text_prompt : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + serp_search_location : typing.Optional[SerpSearchLocation] + + scaleserp_locations : typing.Optional[typing.Sequence[str]] + DEPRECATED: use `serp_search_location` instead + + selected_model : typing.Optional[GoogleImageGenPageRequestSelectedModel] + + negative_prompt : typing.Optional[str] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + prompt_strength : typing.Optional[float] + + sd2upscaling : typing.Optional[bool] + + seed : typing.Optional[int] + + image_guidance_scale : typing.Optional[float] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + GoogleImageGenPageResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.image_from_web_search( + search_query="search_query", + text_prompt="text_prompt", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/GoogleImageGen/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "search_query": search_query, + "text_prompt": text_prompt, + "selected_model": selected_model, + "negative_prompt": negative_prompt, + "num_outputs": num_outputs, + "quality": quality, + "guidance_scale": guidance_scale, + "prompt_strength": prompt_strength, + "sd_2_upscaling": sd2upscaling, + "seed": seed, + "image_guidance_scale": image_guidance_scale, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(GoogleImageGenPageResponse, parse_obj_as(type_=GoogleImageGenPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def remove_background( + self, + *, + input_image: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + selected_model: typing.Optional[ImageSegmentationPageRequestSelectedModel] = OMIT, + mask_threshold: typing.Optional[float] = OMIT, + rect_persepective_transform: typing.Optional[bool] = OMIT, + reflection_opacity: typing.Optional[float] = OMIT, + obj_scale: typing.Optional[float] = OMIT, + obj_pos_x: typing.Optional[float] = OMIT, + obj_pos_y: typing.Optional[float] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> ImageSegmentationPageResponse: + """ + Parameters + ---------- + input_image : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + selected_model : typing.Optional[ImageSegmentationPageRequestSelectedModel] + + mask_threshold : typing.Optional[float] + + rect_persepective_transform : typing.Optional[bool] + + reflection_opacity : typing.Optional[float] + + obj_scale : typing.Optional[float] + + obj_pos_x : typing.Optional[float] + + obj_pos_y : typing.Optional[float] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ImageSegmentationPageResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.remove_background( + input_image="input_image", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/ImageSegmentation/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "input_image": input_image, + "selected_model": selected_model, + "mask_threshold": mask_threshold, + "rect_persepective_transform": rect_persepective_transform, + "reflection_opacity": reflection_opacity, + "obj_scale": obj_scale, + "obj_pos_x": obj_pos_x, + "obj_pos_y": obj_pos_y, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(ImageSegmentationPageResponse, parse_obj_as(type_=ImageSegmentationPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def upscale( + self, + *, + scale: int, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + input_image: typing.Optional[str] = OMIT, + input_video: typing.Optional[str] = OMIT, + selected_models: typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]] = OMIT, + selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> CompareUpscalerPageResponse: + """ + Parameters + ---------- + scale : int + The final upsampling scale of the image + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + input_image : typing.Optional[str] + Input Image + + input_video : typing.Optional[str] + Input Video + + selected_models : typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]] + + selected_bg_model : typing.Optional[typing.Literal["real_esrgan_x2"]] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CompareUpscalerPageResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.upscale( + scale=1, + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/compare-ai-upscalers/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "input_image": input_image, + "input_video": input_video, + "scale": scale, + "selected_models": selected_models, + "selected_bg_model": selected_bg_model, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(CompareUpscalerPageResponse, parse_obj_as(type_=CompareUpscalerPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def embed( + self, + *, + texts: typing.Sequence[str], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + selected_model: typing.Optional[EmbeddingsPageRequestSelectedModel] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> EmbeddingsPageResponse: + """ + Parameters + ---------- + texts : typing.Sequence[str] + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + selected_model : typing.Optional[EmbeddingsPageRequestSelectedModel] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EmbeddingsPageResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.embed( + texts=["texts"], + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/embeddings/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "texts": texts, + "selected_model": selected_model, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(EmbeddingsPageResponse, parse_obj_as(type_=EmbeddingsPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def seo_people_also_ask_doc( + self, + *, + search_query: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + keyword_query: typing.Optional[RelatedQnADocPageRequestKeywordQuery] = OMIT, + documents: typing.Optional[typing.Sequence[str]] = OMIT, + max_references: typing.Optional[int] = OMIT, + max_context_words: typing.Optional[int] = OMIT, + scroll_jump: typing.Optional[int] = OMIT, + doc_extract_url: typing.Optional[str] = OMIT, + embedding_model: typing.Optional[RelatedQnADocPageRequestEmbeddingModel] = OMIT, + dense_weight: typing.Optional[float] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + query_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[RelatedQnADocPageRequestSelectedModel] = OMIT, + citation_style: typing.Optional[RelatedQnADocPageRequestCitationStyle] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[RelatedQnADocPageRequestResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, + serp_search_type: typing.Optional[SerpSearchType] = OMIT, + scaleserp_search_field: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> RelatedQnADocPageResponse: + """ + Parameters + ---------- + search_query : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + keyword_query : typing.Optional[RelatedQnADocPageRequestKeywordQuery] + + documents : typing.Optional[typing.Sequence[str]] + + max_references : typing.Optional[int] + + max_context_words : typing.Optional[int] + + scroll_jump : typing.Optional[int] + + doc_extract_url : typing.Optional[str] + + embedding_model : typing.Optional[RelatedQnADocPageRequestEmbeddingModel] + + dense_weight : typing.Optional[float] + + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + + task_instructions : typing.Optional[str] + + query_instructions : typing.Optional[str] + + selected_model : typing.Optional[RelatedQnADocPageRequestSelectedModel] + + citation_style : typing.Optional[RelatedQnADocPageRequestCitationStyle] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[RelatedQnADocPageRequestResponseFormatType] + + serp_search_location : typing.Optional[SerpSearchLocation] + + scaleserp_locations : typing.Optional[typing.Sequence[str]] + DEPRECATED: use `serp_search_location` instead + + serp_search_type : typing.Optional[SerpSearchType] + + scaleserp_search_field : typing.Optional[str] + DEPRECATED: use `serp_search_type` instead + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + RelatedQnADocPageResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.seo_people_also_ask_doc( + search_query="search_query", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/related-qna-maker-doc/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "search_query": search_query, + "keyword_query": keyword_query, + "documents": documents, + "max_references": max_references, + "max_context_words": max_context_words, + "scroll_jump": scroll_jump, + "doc_extract_url": doc_extract_url, + "embedding_model": embedding_model, + "dense_weight": dense_weight, + "task_instructions": task_instructions, + "query_instructions": query_instructions, + "selected_model": selected_model, + "citation_style": citation_style, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "serp_search_type": serp_search_type, + "scaleserp_search_field": scaleserp_search_field, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(RelatedQnADocPageResponse, parse_obj_as(type_=RelatedQnADocPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def health_status_get(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: + """ + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Any + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.health_status_get() + """ + _response = self._client_wrapper.httpx_client.request("status", method="GET", request_options=request_options) + try: + if 200 <= _response.status_code < 300: + return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncGooey: + """ + Use this class to access the different functions within the SDK. You can instantiate any number of clients with different configuration that will propagate to these functions. + + Parameters + ---------- + base_url : typing.Optional[str] + The base url to use for requests from the client. + + environment : GooeyEnvironment + The environment to use for requests from the client. from .environment import GooeyEnvironment + + + + Defaults to GooeyEnvironment.DEFAULT + + + + api_key : typing.Optional[typing.Union[str, typing.Callable[[], str]]] + timeout : typing.Optional[float] + The timeout to be used, in seconds, for requests. By default the timeout is 60 seconds, unless a custom httpx client is used, in which case this default is not enforced. + + follow_redirects : typing.Optional[bool] + Whether the default httpx client follows redirects or not, this is irrelevant if a custom httpx client is passed in. + + httpx_client : typing.Optional[httpx.AsyncClient] + The httpx client to use for making requests, a preconfigured client is used by default, however this is useful should you want to pass in any custom httpx configuration. + + Examples + -------- + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + """ + + def __init__( + self, + *, + base_url: typing.Optional[str] = None, + environment: GooeyEnvironment = GooeyEnvironment.DEFAULT, + api_key: typing.Optional[typing.Union[str, typing.Callable[[], str]]] = os.getenv("GOOEY_API_KEY"), + timeout: typing.Optional[float] = None, + follow_redirects: typing.Optional[bool] = True, + httpx_client: typing.Optional[httpx.AsyncClient] = None + ): + _defaulted_timeout = timeout if timeout is not None else 60 if httpx_client is None else None + if api_key is None: + raise ApiError(body="The client must be instantiated be either passing in api_key or setting GOOEY_API_KEY") + self._client_wrapper = AsyncClientWrapper( + base_url=_get_base_url(base_url=base_url, environment=environment), + api_key=api_key, + httpx_client=httpx_client + if httpx_client is not None + else httpx.AsyncClient(timeout=_defaulted_timeout, follow_redirects=follow_redirects) + if follow_redirects is not None + else httpx.AsyncClient(timeout=_defaulted_timeout), + timeout=_defaulted_timeout, + ) + self.copilot_integrations = AsyncCopilotIntegrationsClient(client_wrapper=self._client_wrapper) + self.copilot_for_your_enterprise = AsyncCopilotForYourEnterpriseClient(client_wrapper=self._client_wrapper) + self.evaluator = AsyncEvaluatorClient(client_wrapper=self._client_wrapper) + self.smart_gpt = AsyncSmartGptClient(client_wrapper=self._client_wrapper) + self.functions = AsyncFunctionsClient(client_wrapper=self._client_wrapper) + self.lip_syncing = AsyncLipSyncingClient(client_wrapper=self._client_wrapper) + self.misc = AsyncMiscClient(client_wrapper=self._client_wrapper) + self.bulk_runner = AsyncBulkRunnerClient(client_wrapper=self._client_wrapper) + self.embeddings = AsyncEmbeddingsClient(client_wrapper=self._client_wrapper) + + async def animate( + self, + *, + animation_prompts: typing.Sequence[AnimationPrompt], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + max_frames: typing.Optional[int] = OMIT, + selected_model: typing.Optional[DeforumSdPageRequestSelectedModel] = OMIT, + animation_mode: typing.Optional[str] = OMIT, + zoom: typing.Optional[str] = OMIT, + translation_x: typing.Optional[str] = OMIT, + translation_y: typing.Optional[str] = OMIT, + rotation3d_x: typing.Optional[str] = OMIT, + rotation3d_y: typing.Optional[str] = OMIT, + rotation3d_z: typing.Optional[str] = OMIT, + fps: typing.Optional[int] = OMIT, + seed: typing.Optional[int] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> DeforumSdPageResponse: + """ + Parameters + ---------- + animation_prompts : typing.Sequence[AnimationPrompt] + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + max_frames : typing.Optional[int] + + selected_model : typing.Optional[DeforumSdPageRequestSelectedModel] + + animation_mode : typing.Optional[str] + + zoom : typing.Optional[str] + + translation_x : typing.Optional[str] + + translation_y : typing.Optional[str] + + rotation3d_x : typing.Optional[str] + + rotation3d_y : typing.Optional[str] + + rotation3d_z : typing.Optional[str] + + fps : typing.Optional[int] + + seed : typing.Optional[int] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + DeforumSdPageResponse + Successful Response + + Examples + -------- + import asyncio + + from gooey import AnimationPrompt, AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.animate( + animation_prompts=[ + AnimationPrompt( + frame="frame", + prompt="prompt", + ) + ], + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/DeforumSD/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "animation_prompts": animation_prompts, + "max_frames": max_frames, + "selected_model": selected_model, + "animation_mode": animation_mode, + "zoom": zoom, + "translation_x": translation_x, + "translation_y": translation_y, + "rotation_3d_x": rotation3d_x, + "rotation_3d_y": rotation3d_y, + "rotation_3d_z": rotation3d_z, + "fps": fps, + "seed": seed, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(DeforumSdPageResponse, parse_obj_as(type_=DeforumSdPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def qr_code( + self, + *, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + qr_code_data: typing.Optional[str] = OMIT, + qr_code_input_image: typing.Optional[str] = OMIT, + qr_code_vcard: typing.Optional[Vcard] = OMIT, + qr_code_file: typing.Optional[str] = OMIT, + use_url_shortener: typing.Optional[bool] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + image_prompt: typing.Optional[str] = OMIT, + image_prompt_controlnet_models: typing.Optional[ + typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem] + ] = OMIT, + image_prompt_strength: typing.Optional[float] = OMIT, + image_prompt_scale: typing.Optional[float] = OMIT, + image_prompt_pos_x: typing.Optional[float] = OMIT, + image_prompt_pos_y: typing.Optional[float] = OMIT, + selected_model: typing.Optional[QrCodeGeneratorPageRequestSelectedModel] = OMIT, + selected_controlnet_model: typing.Optional[ + typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem] + ] = OMIT, + output_width: typing.Optional[int] = OMIT, + output_height: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + scheduler: typing.Optional[QrCodeGeneratorPageRequestScheduler] = OMIT, + seed: typing.Optional[int] = OMIT, + obj_scale: typing.Optional[float] = OMIT, + obj_pos_x: typing.Optional[float] = OMIT, + obj_pos_y: typing.Optional[float] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> QrCodeGeneratorPageResponse: + """ + Parameters + ---------- + text_prompt : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + qr_code_data : typing.Optional[str] + + qr_code_input_image : typing.Optional[str] + + qr_code_vcard : typing.Optional[Vcard] + + qr_code_file : typing.Optional[str] + + use_url_shortener : typing.Optional[bool] + + negative_prompt : typing.Optional[str] + + image_prompt : typing.Optional[str] + + image_prompt_controlnet_models : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]] + + image_prompt_strength : typing.Optional[float] + + image_prompt_scale : typing.Optional[float] + + image_prompt_pos_x : typing.Optional[float] + + image_prompt_pos_y : typing.Optional[float] + + selected_model : typing.Optional[QrCodeGeneratorPageRequestSelectedModel] + + selected_controlnet_model : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem]] + + output_width : typing.Optional[int] + + output_height : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + scheduler : typing.Optional[QrCodeGeneratorPageRequestScheduler] + + seed : typing.Optional[int] + + obj_scale : typing.Optional[float] + + obj_pos_x : typing.Optional[float] + + obj_pos_y : typing.Optional[float] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + QrCodeGeneratorPageResponse + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.qr_code( + text_prompt="text_prompt", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/art-qr-code/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "qr_code_data": qr_code_data, + "qr_code_input_image": qr_code_input_image, + "qr_code_vcard": qr_code_vcard, + "qr_code_file": qr_code_file, + "use_url_shortener": use_url_shortener, + "text_prompt": text_prompt, + "negative_prompt": negative_prompt, + "image_prompt": image_prompt, + "image_prompt_controlnet_models": image_prompt_controlnet_models, + "image_prompt_strength": image_prompt_strength, + "image_prompt_scale": image_prompt_scale, + "image_prompt_pos_x": image_prompt_pos_x, + "image_prompt_pos_y": image_prompt_pos_y, + "selected_model": selected_model, + "selected_controlnet_model": selected_controlnet_model, + "output_width": output_width, + "output_height": output_height, + "guidance_scale": guidance_scale, + "controlnet_conditioning_scale": controlnet_conditioning_scale, + "num_outputs": num_outputs, + "quality": quality, + "scheduler": scheduler, + "seed": seed, + "obj_scale": obj_scale, + "obj_pos_x": obj_pos_x, + "obj_pos_y": obj_pos_y, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(QrCodeGeneratorPageResponse, parse_obj_as(type_=QrCodeGeneratorPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def seo_people_also_ask( + self, + *, + search_query: str, + site_filter: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + query_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[RelatedQnAPageRequestSelectedModel] = OMIT, + max_search_urls: typing.Optional[int] = OMIT, + max_references: typing.Optional[int] = OMIT, + max_context_words: typing.Optional[int] = OMIT, + scroll_jump: typing.Optional[int] = OMIT, + embedding_model: typing.Optional[RelatedQnAPageRequestEmbeddingModel] = OMIT, + dense_weight: typing.Optional[float] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[RelatedQnAPageRequestResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, + serp_search_type: typing.Optional[SerpSearchType] = OMIT, + scaleserp_search_field: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> RelatedQnAPageResponse: + """ + Parameters + ---------- + search_query : str + + site_filter : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + task_instructions : typing.Optional[str] + + query_instructions : typing.Optional[str] + + selected_model : typing.Optional[RelatedQnAPageRequestSelectedModel] + + max_search_urls : typing.Optional[int] + + max_references : typing.Optional[int] + + max_context_words : typing.Optional[int] + + scroll_jump : typing.Optional[int] + + embedding_model : typing.Optional[RelatedQnAPageRequestEmbeddingModel] + + dense_weight : typing.Optional[float] + + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[RelatedQnAPageRequestResponseFormatType] + + serp_search_location : typing.Optional[SerpSearchLocation] + + scaleserp_locations : typing.Optional[typing.Sequence[str]] + DEPRECATED: use `serp_search_location` instead + + serp_search_type : typing.Optional[SerpSearchType] + + scaleserp_search_field : typing.Optional[str] + DEPRECATED: use `serp_search_type` instead + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + RelatedQnAPageResponse + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.seo_people_also_ask( + search_query="search_query", + site_filter="site_filter", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/related-qna-maker/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "search_query": search_query, + "site_filter": site_filter, + "task_instructions": task_instructions, + "query_instructions": query_instructions, + "selected_model": selected_model, + "max_search_urls": max_search_urls, + "max_references": max_references, + "max_context_words": max_context_words, + "scroll_jump": scroll_jump, + "embedding_model": embedding_model, + "dense_weight": dense_weight, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "serp_search_type": serp_search_type, + "scaleserp_search_field": scaleserp_search_field, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(RelatedQnAPageResponse, parse_obj_as(type_=RelatedQnAPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def seo_content( + self, + *, + search_query: str, + keywords: str, + title: str, + company_url: str, + example_id: typing.Optional[str] = None, + task_instructions: typing.Optional[str] = OMIT, + enable_html: typing.Optional[bool] = OMIT, + selected_model: typing.Optional[SeoSummaryPageRequestSelectedModel] = OMIT, + max_search_urls: typing.Optional[int] = OMIT, + enable_crosslinks: typing.Optional[bool] = OMIT, + seed: typing.Optional[int] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[SeoSummaryPageRequestResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, + serp_search_type: typing.Optional[SerpSearchType] = OMIT, + scaleserp_search_field: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> SeoSummaryPageResponse: + """ + Parameters + ---------- + search_query : str + + keywords : str + + title : str + + company_url : str + + example_id : typing.Optional[str] + + task_instructions : typing.Optional[str] + + enable_html : typing.Optional[bool] + + selected_model : typing.Optional[SeoSummaryPageRequestSelectedModel] + + max_search_urls : typing.Optional[int] + + enable_crosslinks : typing.Optional[bool] + + seed : typing.Optional[int] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[SeoSummaryPageRequestResponseFormatType] + + serp_search_location : typing.Optional[SerpSearchLocation] + + scaleserp_locations : typing.Optional[typing.Sequence[str]] + DEPRECATED: use `serp_search_location` instead + + serp_search_type : typing.Optional[SerpSearchType] + + scaleserp_search_field : typing.Optional[str] + DEPRECATED: use `serp_search_type` instead + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + SeoSummaryPageResponse + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.seo_content( + search_query="search_query", + keywords="keywords", + title="title", + company_url="company_url", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/SEOSummary/async", + method="POST", + params={"example_id": example_id}, + json={ + "search_query": search_query, + "keywords": keywords, + "title": title, + "company_url": company_url, + "task_instructions": task_instructions, + "enable_html": enable_html, + "selected_model": selected_model, + "max_search_urls": max_search_urls, + "enable_crosslinks": enable_crosslinks, + "seed": seed, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "serp_search_type": serp_search_type, + "scaleserp_search_field": scaleserp_search_field, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(SeoSummaryPageResponse, parse_obj_as(type_=SeoSummaryPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def web_search_llm( + self, + *, + search_query: str, + site_filter: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + query_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[GoogleGptPageRequestSelectedModel] = OMIT, + max_search_urls: typing.Optional[int] = OMIT, + max_references: typing.Optional[int] = OMIT, + max_context_words: typing.Optional[int] = OMIT, + scroll_jump: typing.Optional[int] = OMIT, + embedding_model: typing.Optional[GoogleGptPageRequestEmbeddingModel] = OMIT, + dense_weight: typing.Optional[float] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[GoogleGptPageRequestResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, + serp_search_type: typing.Optional[SerpSearchType] = OMIT, + scaleserp_search_field: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> GoogleGptPageResponse: + """ + Parameters + ---------- + search_query : str + + site_filter : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + task_instructions : typing.Optional[str] + + query_instructions : typing.Optional[str] + + selected_model : typing.Optional[GoogleGptPageRequestSelectedModel] + + max_search_urls : typing.Optional[int] + + max_references : typing.Optional[int] + + max_context_words : typing.Optional[int] + + scroll_jump : typing.Optional[int] + + embedding_model : typing.Optional[GoogleGptPageRequestEmbeddingModel] + + dense_weight : typing.Optional[float] + + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[GoogleGptPageRequestResponseFormatType] + + serp_search_location : typing.Optional[SerpSearchLocation] + + scaleserp_locations : typing.Optional[typing.Sequence[str]] + DEPRECATED: use `serp_search_location` instead + + serp_search_type : typing.Optional[SerpSearchType] + + scaleserp_search_field : typing.Optional[str] + DEPRECATED: use `serp_search_type` instead + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + GoogleGptPageResponse + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.web_search_llm( + search_query="search_query", + site_filter="site_filter", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/google-gpt/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "search_query": search_query, + "site_filter": site_filter, + "task_instructions": task_instructions, + "query_instructions": query_instructions, + "selected_model": selected_model, + "max_search_urls": max_search_urls, + "max_references": max_references, + "max_context_words": max_context_words, + "scroll_jump": scroll_jump, + "embedding_model": embedding_model, + "dense_weight": dense_weight, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "serp_search_type": serp_search_type, + "scaleserp_search_field": scaleserp_search_field, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(GoogleGptPageResponse, parse_obj_as(type_=GoogleGptPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def personalize_email( + self, + *, + email_address: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + input_prompt: typing.Optional[str] = OMIT, + selected_model: typing.Optional[SocialLookupEmailPageRequestSelectedModel] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[SocialLookupEmailPageRequestResponseFormatType] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> SocialLookupEmailPageResponse: + """ + Parameters + ---------- + email_address : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + input_prompt : typing.Optional[str] + + selected_model : typing.Optional[SocialLookupEmailPageRequestSelectedModel] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[SocialLookupEmailPageRequestResponseFormatType] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + SocialLookupEmailPageResponse + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.personalize_email( + email_address="email_address", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/SocialLookupEmail/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "email_address": email_address, + "input_prompt": input_prompt, + "selected_model": selected_model, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(SocialLookupEmailPageResponse, parse_obj_as(type_=SocialLookupEmailPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def bulk_run( + self, + *, + documents: typing.Sequence[str], + run_urls: typing.Sequence[str], + input_columns: typing.Dict[str, str], + output_columns: typing.Dict[str, str], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + eval_urls: typing.Optional[typing.Sequence[str]] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> BulkRunnerPageResponse: + """ + Parameters + ---------- + documents : typing.Sequence[str] + + Upload or link to a CSV or google sheet that contains your sample input data. + For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. + Remember to includes header names in your CSV too. + + + run_urls : typing.Sequence[str] + + Provide one or more Gooey.AI workflow runs. + You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them. + + + input_columns : typing.Dict[str, str] + + For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it. + + + output_columns : typing.Dict[str, str] + + For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data. + + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + eval_urls : typing.Optional[typing.Sequence[str]] + + _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs. + + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + BulkRunnerPageResponse + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.bulk_run( + documents=["documents"], + run_urls=["run_urls"], + input_columns={"key": "value"}, + output_columns={"key": "value"}, + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/bulk-runner/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "documents": documents, + "run_urls": run_urls, + "input_columns": input_columns, + "output_columns": output_columns, + "eval_urls": eval_urls, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(BulkRunnerPageResponse, parse_obj_as(type_=BulkRunnerPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def synthesize_data( + self, + *, + documents: typing.Sequence[str], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + sheet_url: typing.Optional[str] = OMIT, + selected_asr_model: typing.Optional[DocExtractPageRequestSelectedAsrModel] = OMIT, + google_translate_target: typing.Optional[str] = OMIT, + glossary_document: typing.Optional[str] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[DocExtractPageRequestSelectedModel] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[DocExtractPageRequestResponseFormatType] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> DocExtractPageResponse: + """ + Parameters + ---------- + documents : typing.Sequence[str] + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + sheet_url : typing.Optional[str] + + selected_asr_model : typing.Optional[DocExtractPageRequestSelectedAsrModel] + + google_translate_target : typing.Optional[str] + + glossary_document : typing.Optional[str] + Provide a glossary to customize translation and improve accuracy of domain-specific terms. + If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). + + task_instructions : typing.Optional[str] + + selected_model : typing.Optional[DocExtractPageRequestSelectedModel] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[DocExtractPageRequestResponseFormatType] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + DocExtractPageResponse + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.synthesize_data( + documents=["documents"], + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/doc-extract/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "documents": documents, + "sheet_url": sheet_url, + "selected_asr_model": selected_asr_model, + "google_translate_target": google_translate_target, + "glossary_document": glossary_document, + "task_instructions": task_instructions, + "selected_model": selected_model, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(DocExtractPageResponse, parse_obj_as(type_=DocExtractPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def llm( + self, + *, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + input_prompt: typing.Optional[str] = OMIT, + selected_models: typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[CompareLlmPageRequestResponseFormatType] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> CompareLlmPageResponse: + """ + Parameters + ---------- + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + input_prompt : typing.Optional[str] + + selected_models : typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[CompareLlmPageRequestResponseFormatType] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CompareLlmPageResponse + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.llm() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/CompareLLM/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "input_prompt": input_prompt, + "selected_models": selected_models, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(CompareLlmPageResponse, parse_obj_as(type_=CompareLlmPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def rag( + self, + *, + search_query: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + keyword_query: typing.Optional[DocSearchPageRequestKeywordQuery] = OMIT, + documents: typing.Optional[typing.Sequence[str]] = OMIT, + max_references: typing.Optional[int] = OMIT, + max_context_words: typing.Optional[int] = OMIT, + scroll_jump: typing.Optional[int] = OMIT, + doc_extract_url: typing.Optional[str] = OMIT, + embedding_model: typing.Optional[DocSearchPageRequestEmbeddingModel] = OMIT, + dense_weight: typing.Optional[float] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + query_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[DocSearchPageRequestSelectedModel] = OMIT, + citation_style: typing.Optional[DocSearchPageRequestCitationStyle] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[DocSearchPageRequestResponseFormatType] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> DocSearchPageResponse: + """ + Parameters + ---------- + search_query : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + keyword_query : typing.Optional[DocSearchPageRequestKeywordQuery] + + documents : typing.Optional[typing.Sequence[str]] + + max_references : typing.Optional[int] + + max_context_words : typing.Optional[int] + + scroll_jump : typing.Optional[int] + + doc_extract_url : typing.Optional[str] + + embedding_model : typing.Optional[DocSearchPageRequestEmbeddingModel] + + dense_weight : typing.Optional[float] + + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + + task_instructions : typing.Optional[str] + + query_instructions : typing.Optional[str] + + selected_model : typing.Optional[DocSearchPageRequestSelectedModel] + + citation_style : typing.Optional[DocSearchPageRequestCitationStyle] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[DocSearchPageRequestResponseFormatType] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + DocSearchPageResponse + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.rag( + search_query="search_query", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/doc-search/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "search_query": search_query, + "keyword_query": keyword_query, + "documents": documents, + "max_references": max_references, + "max_context_words": max_context_words, + "scroll_jump": scroll_jump, + "doc_extract_url": doc_extract_url, + "embedding_model": embedding_model, + "dense_weight": dense_weight, + "task_instructions": task_instructions, + "query_instructions": query_instructions, + "selected_model": selected_model, + "citation_style": citation_style, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(DocSearchPageResponse, parse_obj_as(type_=DocSearchPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def doc_summary( + self, + *, + documents: typing.Sequence[str], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + merge_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[DocSummaryPageRequestSelectedModel] = OMIT, + chain_type: typing.Optional[typing.Literal["map_reduce"]] = OMIT, + selected_asr_model: typing.Optional[DocSummaryPageRequestSelectedAsrModel] = OMIT, + google_translate_target: typing.Optional[str] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[DocSummaryPageRequestResponseFormatType] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> DocSummaryPageResponse: + """ + Parameters + ---------- + documents : typing.Sequence[str] + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + task_instructions : typing.Optional[str] + + merge_instructions : typing.Optional[str] + + selected_model : typing.Optional[DocSummaryPageRequestSelectedModel] + + chain_type : typing.Optional[typing.Literal["map_reduce"]] + + selected_asr_model : typing.Optional[DocSummaryPageRequestSelectedAsrModel] + + google_translate_target : typing.Optional[str] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[DocSummaryPageRequestResponseFormatType] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + DocSummaryPageResponse + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.doc_summary( + documents=["documents"], + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/doc-summary/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "documents": documents, + "task_instructions": task_instructions, + "merge_instructions": merge_instructions, + "selected_model": selected_model, + "chain_type": chain_type, + "selected_asr_model": selected_asr_model, + "google_translate_target": google_translate_target, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(DocSummaryPageResponse, parse_obj_as(type_=DocSummaryPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def lipsync_tts( + self, + *, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + tts_provider: typing.Optional[LipsyncTtsPageRequestTtsProvider] = OMIT, + uberduck_voice_name: typing.Optional[str] = OMIT, + uberduck_speaking_rate: typing.Optional[float] = OMIT, + google_voice_name: typing.Optional[str] = OMIT, + google_speaking_rate: typing.Optional[float] = OMIT, + google_pitch: typing.Optional[float] = OMIT, + bark_history_prompt: typing.Optional[str] = OMIT, + elevenlabs_voice_name: typing.Optional[str] = OMIT, + elevenlabs_api_key: typing.Optional[str] = OMIT, + elevenlabs_voice_id: typing.Optional[str] = OMIT, + elevenlabs_model: typing.Optional[str] = OMIT, + elevenlabs_stability: typing.Optional[float] = OMIT, + elevenlabs_similarity_boost: typing.Optional[float] = OMIT, + elevenlabs_style: typing.Optional[float] = OMIT, + elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, + azure_voice_name: typing.Optional[str] = OMIT, + openai_voice_name: typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] = OMIT, + openai_tts_model: typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] = OMIT, + input_face: typing.Optional[str] = OMIT, + face_padding_top: typing.Optional[int] = OMIT, + face_padding_bottom: typing.Optional[int] = OMIT, + face_padding_left: typing.Optional[int] = OMIT, + face_padding_right: typing.Optional[int] = OMIT, + sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT, + selected_model: typing.Optional[LipsyncTtsPageRequestSelectedModel] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> LipsyncTtsPageResponse: + """ + Parameters + ---------- + text_prompt : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + tts_provider : typing.Optional[LipsyncTtsPageRequestTtsProvider] + + uberduck_voice_name : typing.Optional[str] + + uberduck_speaking_rate : typing.Optional[float] + + google_voice_name : typing.Optional[str] + + google_speaking_rate : typing.Optional[float] + + google_pitch : typing.Optional[float] + + bark_history_prompt : typing.Optional[str] + + elevenlabs_voice_name : typing.Optional[str] + Use `elevenlabs_voice_id` instead + + elevenlabs_api_key : typing.Optional[str] + + elevenlabs_voice_id : typing.Optional[str] + + elevenlabs_model : typing.Optional[str] + + elevenlabs_stability : typing.Optional[float] + + elevenlabs_similarity_boost : typing.Optional[float] + + elevenlabs_style : typing.Optional[float] + + elevenlabs_speaker_boost : typing.Optional[bool] + + azure_voice_name : typing.Optional[str] + + openai_voice_name : typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] + + openai_tts_model : typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] + + input_face : typing.Optional[str] + + face_padding_top : typing.Optional[int] + + face_padding_bottom : typing.Optional[int] + + face_padding_left : typing.Optional[int] + + face_padding_right : typing.Optional[int] + + sadtalker_settings : typing.Optional[SadTalkerSettings] + + selected_model : typing.Optional[LipsyncTtsPageRequestSelectedModel] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + LipsyncTtsPageResponse + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.lipsync_tts( + text_prompt="text_prompt", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/LipsyncTTS/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "text_prompt": text_prompt, + "tts_provider": tts_provider, + "uberduck_voice_name": uberduck_voice_name, + "uberduck_speaking_rate": uberduck_speaking_rate, + "google_voice_name": google_voice_name, + "google_speaking_rate": google_speaking_rate, + "google_pitch": google_pitch, + "bark_history_prompt": bark_history_prompt, + "elevenlabs_voice_name": elevenlabs_voice_name, + "elevenlabs_api_key": elevenlabs_api_key, + "elevenlabs_voice_id": elevenlabs_voice_id, + "elevenlabs_model": elevenlabs_model, + "elevenlabs_stability": elevenlabs_stability, + "elevenlabs_similarity_boost": elevenlabs_similarity_boost, + "elevenlabs_style": elevenlabs_style, + "elevenlabs_speaker_boost": elevenlabs_speaker_boost, + "azure_voice_name": azure_voice_name, + "openai_voice_name": openai_voice_name, + "openai_tts_model": openai_tts_model, + "input_face": input_face, + "face_padding_top": face_padding_top, + "face_padding_bottom": face_padding_bottom, + "face_padding_left": face_padding_left, + "face_padding_right": face_padding_right, + "sadtalker_settings": sadtalker_settings, + "selected_model": selected_model, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(LipsyncTtsPageResponse, parse_obj_as(type_=LipsyncTtsPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def text_to_speech( + self, + *, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + tts_provider: typing.Optional[TextToSpeechPageRequestTtsProvider] = OMIT, + uberduck_voice_name: typing.Optional[str] = OMIT, + uberduck_speaking_rate: typing.Optional[float] = OMIT, + google_voice_name: typing.Optional[str] = OMIT, + google_speaking_rate: typing.Optional[float] = OMIT, + google_pitch: typing.Optional[float] = OMIT, + bark_history_prompt: typing.Optional[str] = OMIT, + elevenlabs_voice_name: typing.Optional[str] = OMIT, + elevenlabs_api_key: typing.Optional[str] = OMIT, + elevenlabs_voice_id: typing.Optional[str] = OMIT, + elevenlabs_model: typing.Optional[str] = OMIT, + elevenlabs_stability: typing.Optional[float] = OMIT, + elevenlabs_similarity_boost: typing.Optional[float] = OMIT, + elevenlabs_style: typing.Optional[float] = OMIT, + elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, + azure_voice_name: typing.Optional[str] = OMIT, + openai_voice_name: typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] = OMIT, + openai_tts_model: typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> TextToSpeechPageResponse: + """ + Parameters + ---------- + text_prompt : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + tts_provider : typing.Optional[TextToSpeechPageRequestTtsProvider] + + uberduck_voice_name : typing.Optional[str] + + uberduck_speaking_rate : typing.Optional[float] + + google_voice_name : typing.Optional[str] + + google_speaking_rate : typing.Optional[float] + + google_pitch : typing.Optional[float] + + bark_history_prompt : typing.Optional[str] + + elevenlabs_voice_name : typing.Optional[str] + Use `elevenlabs_voice_id` instead + + elevenlabs_api_key : typing.Optional[str] + + elevenlabs_voice_id : typing.Optional[str] + + elevenlabs_model : typing.Optional[str] + + elevenlabs_stability : typing.Optional[float] + + elevenlabs_similarity_boost : typing.Optional[float] + + elevenlabs_style : typing.Optional[float] + + elevenlabs_speaker_boost : typing.Optional[bool] + + azure_voice_name : typing.Optional[str] + + openai_voice_name : typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] + + openai_tts_model : typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + TextToSpeechPageResponse + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.text_to_speech( + text_prompt="text_prompt", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/TextToSpeech/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "text_prompt": text_prompt, + "tts_provider": tts_provider, + "uberduck_voice_name": uberduck_voice_name, + "uberduck_speaking_rate": uberduck_speaking_rate, + "google_voice_name": google_voice_name, + "google_speaking_rate": google_speaking_rate, + "google_pitch": google_pitch, + "bark_history_prompt": bark_history_prompt, + "elevenlabs_voice_name": elevenlabs_voice_name, + "elevenlabs_api_key": elevenlabs_api_key, + "elevenlabs_voice_id": elevenlabs_voice_id, + "elevenlabs_model": elevenlabs_model, + "elevenlabs_stability": elevenlabs_stability, + "elevenlabs_similarity_boost": elevenlabs_similarity_boost, + "elevenlabs_style": elevenlabs_style, + "elevenlabs_speaker_boost": elevenlabs_speaker_boost, + "azure_voice_name": azure_voice_name, + "openai_voice_name": openai_voice_name, + "openai_tts_model": openai_tts_model, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(TextToSpeechPageResponse, parse_obj_as(type_=TextToSpeechPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def speech_recognition( + self, + *, + documents: typing.Sequence[str], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + selected_model: typing.Optional[AsrPageRequestSelectedModel] = OMIT, + language: typing.Optional[str] = OMIT, + translation_model: typing.Optional[AsrPageRequestTranslationModel] = OMIT, + output_format: typing.Optional[AsrPageRequestOutputFormat] = OMIT, + google_translate_target: typing.Optional[str] = OMIT, + translation_source: typing.Optional[str] = OMIT, + translation_target: typing.Optional[str] = OMIT, + glossary_document: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> AsrPageResponse: + """ + Parameters + ---------- + documents : typing.Sequence[str] + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + selected_model : typing.Optional[AsrPageRequestSelectedModel] + + language : typing.Optional[str] + + translation_model : typing.Optional[AsrPageRequestTranslationModel] + + output_format : typing.Optional[AsrPageRequestOutputFormat] + + google_translate_target : typing.Optional[str] + use `translation_model` & `translation_target` instead. + + translation_source : typing.Optional[str] + + translation_target : typing.Optional[str] + + glossary_document : typing.Optional[str] + Provide a glossary to customize translation and improve accuracy of domain-specific terms. + If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsrPageResponse + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.speech_recognition( + documents=["documents"], + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/asr/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "documents": documents, + "selected_model": selected_model, + "language": language, + "translation_model": translation_model, + "output_format": output_format, + "google_translate_target": google_translate_target, + "translation_source": translation_source, + "translation_target": translation_target, + "glossary_document": glossary_document, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(AsrPageResponse, parse_obj_as(type_=AsrPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def text_to_music( + self, + *, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + duration_sec: typing.Optional[float] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + seed: typing.Optional[int] = OMIT, + sd2upscaling: typing.Optional[bool] = OMIT, + selected_models: typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> Text2AudioPageResponse: + """ + Parameters + ---------- + text_prompt : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + negative_prompt : typing.Optional[str] + + duration_sec : typing.Optional[float] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + seed : typing.Optional[int] + + sd2upscaling : typing.Optional[bool] + + selected_models : typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + Text2AudioPageResponse + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.text_to_music( + text_prompt="text_prompt", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/text2audio/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "text_prompt": text_prompt, + "negative_prompt": negative_prompt, + "duration_sec": duration_sec, + "num_outputs": num_outputs, + "quality": quality, + "guidance_scale": guidance_scale, + "seed": seed, + "sd_2_upscaling": sd2upscaling, + "selected_models": selected_models, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(Text2AudioPageResponse, parse_obj_as(type_=Text2AudioPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def translate( + self, + *, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + texts: typing.Optional[typing.Sequence[str]] = OMIT, + selected_model: typing.Optional[TranslationPageRequestSelectedModel] = OMIT, + translation_source: typing.Optional[str] = OMIT, + translation_target: typing.Optional[str] = OMIT, + glossary_document: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> TranslationPageResponse: + """ + Parameters + ---------- + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + texts : typing.Optional[typing.Sequence[str]] + + selected_model : typing.Optional[TranslationPageRequestSelectedModel] + + translation_source : typing.Optional[str] + + translation_target : typing.Optional[str] + + glossary_document : typing.Optional[str] + Provide a glossary to customize translation and improve accuracy of domain-specific terms. + If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + TranslationPageResponse + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.translate() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/translate/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "texts": texts, + "selected_model": selected_model, + "translation_source": translation_source, + "translation_target": translation_target, + "glossary_document": glossary_document, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(TranslationPageResponse, parse_obj_as(type_=TranslationPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def remix_image( + self, + *, + input_image: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + text_prompt: typing.Optional[str] = OMIT, + selected_model: typing.Optional[Img2ImgPageRequestSelectedModel] = OMIT, + selected_controlnet_model: typing.Optional[Img2ImgPageRequestSelectedControlnetModel] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + output_width: typing.Optional[int] = OMIT, + output_height: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + prompt_strength: typing.Optional[float] = OMIT, + controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT, + seed: typing.Optional[int] = OMIT, + image_guidance_scale: typing.Optional[float] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> Img2ImgPageResponse: + """ + Parameters + ---------- + input_image : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + text_prompt : typing.Optional[str] + + selected_model : typing.Optional[Img2ImgPageRequestSelectedModel] + + selected_controlnet_model : typing.Optional[Img2ImgPageRequestSelectedControlnetModel] + + negative_prompt : typing.Optional[str] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + output_width : typing.Optional[int] + + output_height : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + prompt_strength : typing.Optional[float] + + controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]] + + seed : typing.Optional[int] + + image_guidance_scale : typing.Optional[float] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + Img2ImgPageResponse + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.remix_image( + input_image="input_image", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/Img2Img/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "input_image": input_image, + "text_prompt": text_prompt, + "selected_model": selected_model, + "selected_controlnet_model": selected_controlnet_model, + "negative_prompt": negative_prompt, + "num_outputs": num_outputs, + "quality": quality, + "output_width": output_width, + "output_height": output_height, + "guidance_scale": guidance_scale, + "prompt_strength": prompt_strength, + "controlnet_conditioning_scale": controlnet_conditioning_scale, + "seed": seed, + "image_guidance_scale": image_guidance_scale, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(Img2ImgPageResponse, parse_obj_as(type_=Img2ImgPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def text_to_image( + self, + *, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + output_width: typing.Optional[int] = OMIT, + output_height: typing.Optional[int] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + dall_e3quality: typing.Optional[str] = OMIT, + dall_e3style: typing.Optional[str] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + seed: typing.Optional[int] = OMIT, + sd2upscaling: typing.Optional[bool] = OMIT, + selected_models: typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] = OMIT, + scheduler: typing.Optional[CompareText2ImgPageRequestScheduler] = OMIT, + edit_instruction: typing.Optional[str] = OMIT, + image_guidance_scale: typing.Optional[float] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> CompareText2ImgPageResponse: + """ + Parameters + ---------- + text_prompt : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + negative_prompt : typing.Optional[str] + + output_width : typing.Optional[int] + + output_height : typing.Optional[int] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + dall_e3quality : typing.Optional[str] + + dall_e3style : typing.Optional[str] + + guidance_scale : typing.Optional[float] + + seed : typing.Optional[int] + + sd2upscaling : typing.Optional[bool] + + selected_models : typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] + + scheduler : typing.Optional[CompareText2ImgPageRequestScheduler] + + edit_instruction : typing.Optional[str] + + image_guidance_scale : typing.Optional[float] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CompareText2ImgPageResponse + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.text_to_image( + text_prompt="text_prompt", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/CompareText2Img/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "text_prompt": text_prompt, + "negative_prompt": negative_prompt, + "output_width": output_width, + "output_height": output_height, + "num_outputs": num_outputs, + "quality": quality, + "dall_e_3_quality": dall_e3quality, + "dall_e_3_style": dall_e3style, + "guidance_scale": guidance_scale, + "seed": seed, + "sd_2_upscaling": sd2upscaling, + "selected_models": selected_models, + "scheduler": scheduler, + "edit_instruction": edit_instruction, + "image_guidance_scale": image_guidance_scale, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(CompareText2ImgPageResponse, parse_obj_as(type_=CompareText2ImgPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def product_image( + self, + *, + input_image: str, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + obj_scale: typing.Optional[float] = OMIT, + obj_pos_x: typing.Optional[float] = OMIT, + obj_pos_y: typing.Optional[float] = OMIT, + mask_threshold: typing.Optional[float] = OMIT, + selected_model: typing.Optional[ObjectInpaintingPageRequestSelectedModel] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + output_width: typing.Optional[int] = OMIT, + output_height: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + sd2upscaling: typing.Optional[bool] = OMIT, + seed: typing.Optional[int] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> ObjectInpaintingPageResponse: + """ + Parameters + ---------- + input_image : str + + text_prompt : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + obj_scale : typing.Optional[float] + + obj_pos_x : typing.Optional[float] + + obj_pos_y : typing.Optional[float] + + mask_threshold : typing.Optional[float] + + selected_model : typing.Optional[ObjectInpaintingPageRequestSelectedModel] + + negative_prompt : typing.Optional[str] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + output_width : typing.Optional[int] + + output_height : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + sd2upscaling : typing.Optional[bool] + + seed : typing.Optional[int] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ObjectInpaintingPageResponse + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.product_image( + input_image="input_image", + text_prompt="text_prompt", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/ObjectInpainting/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "input_image": input_image, + "text_prompt": text_prompt, + "obj_scale": obj_scale, + "obj_pos_x": obj_pos_x, + "obj_pos_y": obj_pos_y, + "mask_threshold": mask_threshold, + "selected_model": selected_model, + "negative_prompt": negative_prompt, + "num_outputs": num_outputs, + "quality": quality, + "output_width": output_width, + "output_height": output_height, + "guidance_scale": guidance_scale, + "sd_2_upscaling": sd2upscaling, + "seed": seed, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(ObjectInpaintingPageResponse, parse_obj_as(type_=ObjectInpaintingPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def portrait( + self, + *, + input_image: str, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + face_scale: typing.Optional[float] = OMIT, + face_pos_x: typing.Optional[float] = OMIT, + face_pos_y: typing.Optional[float] = OMIT, + selected_model: typing.Optional[FaceInpaintingPageRequestSelectedModel] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + upscale_factor: typing.Optional[float] = OMIT, + output_width: typing.Optional[int] = OMIT, + output_height: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + seed: typing.Optional[int] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> FaceInpaintingPageResponse: + """ + Parameters + ---------- + input_image : str + + text_prompt : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + face_scale : typing.Optional[float] + + face_pos_x : typing.Optional[float] + + face_pos_y : typing.Optional[float] + + selected_model : typing.Optional[FaceInpaintingPageRequestSelectedModel] + + negative_prompt : typing.Optional[str] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + upscale_factor : typing.Optional[float] + + output_width : typing.Optional[int] + + output_height : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + seed : typing.Optional[int] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + FaceInpaintingPageResponse + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.portrait( + input_image="input_image", + text_prompt="tony stark from the iron man", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/FaceInpainting/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "input_image": input_image, + "text_prompt": text_prompt, + "face_scale": face_scale, + "face_pos_x": face_pos_x, + "face_pos_y": face_pos_y, + "selected_model": selected_model, + "negative_prompt": negative_prompt, + "num_outputs": num_outputs, + "quality": quality, + "upscale_factor": upscale_factor, + "output_width": output_width, + "output_height": output_height, + "guidance_scale": guidance_scale, + "seed": seed, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(FaceInpaintingPageResponse, parse_obj_as(type_=FaceInpaintingPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def image_from_email( + self, + *, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + email_address: typing.Optional[str] = OMIT, + twitter_handle: typing.Optional[str] = OMIT, + face_scale: typing.Optional[float] = OMIT, + face_pos_x: typing.Optional[float] = OMIT, + face_pos_y: typing.Optional[float] = OMIT, + selected_model: typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + upscale_factor: typing.Optional[float] = OMIT, + output_width: typing.Optional[int] = OMIT, + output_height: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + should_send_email: typing.Optional[bool] = OMIT, + email_from: typing.Optional[str] = OMIT, + email_cc: typing.Optional[str] = OMIT, + email_bcc: typing.Optional[str] = OMIT, + email_subject: typing.Optional[str] = OMIT, + email_body: typing.Optional[str] = OMIT, + email_body_enable_html: typing.Optional[bool] = OMIT, + fallback_email_body: typing.Optional[str] = OMIT, + seed: typing.Optional[int] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> EmailFaceInpaintingPageResponse: + """ + Parameters + ---------- + text_prompt : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + email_address : typing.Optional[str] + + twitter_handle : typing.Optional[str] + + face_scale : typing.Optional[float] + + face_pos_x : typing.Optional[float] + + face_pos_y : typing.Optional[float] + + selected_model : typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] + + negative_prompt : typing.Optional[str] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + upscale_factor : typing.Optional[float] + + output_width : typing.Optional[int] + + output_height : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + should_send_email : typing.Optional[bool] + + email_from : typing.Optional[str] + + email_cc : typing.Optional[str] + + email_bcc : typing.Optional[str] + + email_subject : typing.Optional[str] + + email_body : typing.Optional[str] + + email_body_enable_html : typing.Optional[bool] + + fallback_email_body : typing.Optional[str] + + seed : typing.Optional[int] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EmailFaceInpaintingPageResponse + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.image_from_email( + email_address="sean@dara.network", + text_prompt="winter's day in paris", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/EmailFaceInpainting/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "email_address": email_address, + "twitter_handle": twitter_handle, + "text_prompt": text_prompt, + "face_scale": face_scale, + "face_pos_x": face_pos_x, + "face_pos_y": face_pos_y, + "selected_model": selected_model, + "negative_prompt": negative_prompt, + "num_outputs": num_outputs, + "quality": quality, + "upscale_factor": upscale_factor, + "output_width": output_width, + "output_height": output_height, + "guidance_scale": guidance_scale, + "should_send_email": should_send_email, + "email_from": email_from, + "email_cc": email_cc, + "email_bcc": email_bcc, + "email_subject": email_subject, + "email_body": email_body, + "email_body_enable_html": email_body_enable_html, + "fallback_email_body": fallback_email_body, + "seed": seed, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(EmailFaceInpaintingPageResponse, parse_obj_as(type_=EmailFaceInpaintingPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def image_from_web_search( + self, + *, + search_query: str, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, + selected_model: typing.Optional[GoogleImageGenPageRequestSelectedModel] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + prompt_strength: typing.Optional[float] = OMIT, + sd2upscaling: typing.Optional[bool] = OMIT, + seed: typing.Optional[int] = OMIT, + image_guidance_scale: typing.Optional[float] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> GoogleImageGenPageResponse: + """ + Parameters + ---------- + search_query : str + + text_prompt : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + serp_search_location : typing.Optional[SerpSearchLocation] + + scaleserp_locations : typing.Optional[typing.Sequence[str]] + DEPRECATED: use `serp_search_location` instead + + selected_model : typing.Optional[GoogleImageGenPageRequestSelectedModel] + + negative_prompt : typing.Optional[str] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + prompt_strength : typing.Optional[float] + + sd2upscaling : typing.Optional[bool] + + seed : typing.Optional[int] + + image_guidance_scale : typing.Optional[float] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + GoogleImageGenPageResponse + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.image_from_web_search( + search_query="search_query", + text_prompt="text_prompt", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/GoogleImageGen/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "search_query": search_query, + "text_prompt": text_prompt, + "selected_model": selected_model, + "negative_prompt": negative_prompt, + "num_outputs": num_outputs, + "quality": quality, + "guidance_scale": guidance_scale, + "prompt_strength": prompt_strength, + "sd_2_upscaling": sd2upscaling, + "seed": seed, + "image_guidance_scale": image_guidance_scale, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(GoogleImageGenPageResponse, parse_obj_as(type_=GoogleImageGenPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def remove_background( + self, + *, + input_image: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + selected_model: typing.Optional[ImageSegmentationPageRequestSelectedModel] = OMIT, + mask_threshold: typing.Optional[float] = OMIT, + rect_persepective_transform: typing.Optional[bool] = OMIT, + reflection_opacity: typing.Optional[float] = OMIT, + obj_scale: typing.Optional[float] = OMIT, + obj_pos_x: typing.Optional[float] = OMIT, + obj_pos_y: typing.Optional[float] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> ImageSegmentationPageResponse: + """ + Parameters + ---------- + input_image : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + selected_model : typing.Optional[ImageSegmentationPageRequestSelectedModel] + + mask_threshold : typing.Optional[float] + + rect_persepective_transform : typing.Optional[bool] + + reflection_opacity : typing.Optional[float] + + obj_scale : typing.Optional[float] + + obj_pos_x : typing.Optional[float] + + obj_pos_y : typing.Optional[float] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ImageSegmentationPageResponse + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.remove_background( + input_image="input_image", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/ImageSegmentation/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "input_image": input_image, + "selected_model": selected_model, + "mask_threshold": mask_threshold, + "rect_persepective_transform": rect_persepective_transform, + "reflection_opacity": reflection_opacity, + "obj_scale": obj_scale, + "obj_pos_x": obj_pos_x, + "obj_pos_y": obj_pos_y, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(ImageSegmentationPageResponse, parse_obj_as(type_=ImageSegmentationPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def upscale( + self, + *, + scale: int, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + input_image: typing.Optional[str] = OMIT, + input_video: typing.Optional[str] = OMIT, + selected_models: typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]] = OMIT, + selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> CompareUpscalerPageResponse: + """ + Parameters + ---------- + scale : int + The final upsampling scale of the image + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + input_image : typing.Optional[str] + Input Image + + input_video : typing.Optional[str] + Input Video + + selected_models : typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]] + + selected_bg_model : typing.Optional[typing.Literal["real_esrgan_x2"]] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CompareUpscalerPageResponse + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.upscale( + scale=1, + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/compare-ai-upscalers/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "input_image": input_image, + "input_video": input_video, + "scale": scale, + "selected_models": selected_models, + "selected_bg_model": selected_bg_model, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(CompareUpscalerPageResponse, parse_obj_as(type_=CompareUpscalerPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def embed( + self, + *, + texts: typing.Sequence[str], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + selected_model: typing.Optional[EmbeddingsPageRequestSelectedModel] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> EmbeddingsPageResponse: + """ + Parameters + ---------- + texts : typing.Sequence[str] + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + selected_model : typing.Optional[EmbeddingsPageRequestSelectedModel] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EmbeddingsPageResponse + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.embed( + texts=["texts"], + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/embeddings/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "texts": texts, + "selected_model": selected_model, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(EmbeddingsPageResponse, parse_obj_as(type_=EmbeddingsPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def seo_people_also_ask_doc( + self, + *, + search_query: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + keyword_query: typing.Optional[RelatedQnADocPageRequestKeywordQuery] = OMIT, + documents: typing.Optional[typing.Sequence[str]] = OMIT, + max_references: typing.Optional[int] = OMIT, + max_context_words: typing.Optional[int] = OMIT, + scroll_jump: typing.Optional[int] = OMIT, + doc_extract_url: typing.Optional[str] = OMIT, + embedding_model: typing.Optional[RelatedQnADocPageRequestEmbeddingModel] = OMIT, + dense_weight: typing.Optional[float] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + query_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[RelatedQnADocPageRequestSelectedModel] = OMIT, + citation_style: typing.Optional[RelatedQnADocPageRequestCitationStyle] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[RelatedQnADocPageRequestResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, + serp_search_type: typing.Optional[SerpSearchType] = OMIT, + scaleserp_search_field: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None + ) -> RelatedQnADocPageResponse: + """ + Parameters + ---------- + search_query : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Any]] + Variables to be used as Jinja prompt templates and in functions as arguments + + keyword_query : typing.Optional[RelatedQnADocPageRequestKeywordQuery] + + documents : typing.Optional[typing.Sequence[str]] + + max_references : typing.Optional[int] + + max_context_words : typing.Optional[int] + + scroll_jump : typing.Optional[int] + + doc_extract_url : typing.Optional[str] + + embedding_model : typing.Optional[RelatedQnADocPageRequestEmbeddingModel] + + dense_weight : typing.Optional[float] + + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + + task_instructions : typing.Optional[str] + + query_instructions : typing.Optional[str] + + selected_model : typing.Optional[RelatedQnADocPageRequestSelectedModel] + + citation_style : typing.Optional[RelatedQnADocPageRequestCitationStyle] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[RelatedQnADocPageRequestResponseFormatType] + + serp_search_location : typing.Optional[SerpSearchLocation] + + scaleserp_locations : typing.Optional[typing.Sequence[str]] + DEPRECATED: use `serp_search_location` instead + + serp_search_type : typing.Optional[SerpSearchType] + + scaleserp_search_field : typing.Optional[str] + DEPRECATED: use `serp_search_type` instead + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + RelatedQnADocPageResponse + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.seo_people_also_ask_doc( + search_query="search_query", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/related-qna-maker-doc/async", + method="POST", + params={"example_id": example_id}, + json={ + "functions": functions, + "variables": variables, + "search_query": search_query, + "keyword_query": keyword_query, + "documents": documents, + "max_references": max_references, + "max_context_words": max_context_words, + "scroll_jump": scroll_jump, + "doc_extract_url": doc_extract_url, + "embedding_model": embedding_model, + "dense_weight": dense_weight, + "task_instructions": task_instructions, + "query_instructions": query_instructions, + "selected_model": selected_model, + "citation_style": citation_style, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "serp_search_type": serp_search_type, + "scaleserp_search_field": scaleserp_search_field, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(RelatedQnADocPageResponse, parse_obj_as(type_=RelatedQnADocPageResponse, object_=_response.json())) # type: ignore + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def health_status_get(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: + """ + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Any + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.health_status_get() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "status", method="GET", request_options=request_options + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) def _get_base_url(*, base_url: typing.Optional[str] = None, environment: GooeyEnvironment) -> str: diff --git a/src/gooey/compare_ai_image_generators/__init__.py b/src/gooey/compare_ai_image_generators/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/compare_ai_image_generators/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/compare_ai_image_generators/client.py b/src/gooey/compare_ai_image_generators/client.py deleted file mode 100644 index 29efb05..0000000 --- a/src/gooey/compare_ai_image_generators/client.py +++ /dev/null @@ -1,686 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.internal_server_error import InternalServerError -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3 -from ..types.compare_text2img_page_request_scheduler import CompareText2ImgPageRequestScheduler -from ..types.compare_text2img_page_request_selected_models_item import CompareText2ImgPageRequestSelectedModelsItem -from ..types.compare_text2img_page_response import CompareText2ImgPageResponse -from ..types.compare_text2img_page_status_response import CompareText2ImgPageStatusResponse -from ..types.failed_reponse_model_v2 import FailedReponseModelV2 -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError -from ..types.recipe_function import RecipeFunction -from ..types.run_settings import RunSettings - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class CompareAiImageGeneratorsClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def compare_text2img( - self, - *, - text_prompt: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - dall_e3quality: typing.Optional[str] = OMIT, - dall_e3style: typing.Optional[str] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - seed: typing.Optional[int] = OMIT, - sd2upscaling: typing.Optional[bool] = OMIT, - selected_models: typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] = OMIT, - scheduler: typing.Optional[CompareText2ImgPageRequestScheduler] = OMIT, - edit_instruction: typing.Optional[str] = OMIT, - image_guidance_scale: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> CompareText2ImgPageResponse: - """ - Parameters - ---------- - text_prompt : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - negative_prompt : typing.Optional[str] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - dall_e3quality : typing.Optional[str] - - dall_e3style : typing.Optional[str] - - guidance_scale : typing.Optional[float] - - seed : typing.Optional[int] - - sd2upscaling : typing.Optional[bool] - - selected_models : typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] - - scheduler : typing.Optional[CompareText2ImgPageRequestScheduler] - - edit_instruction : typing.Optional[str] - - image_guidance_scale : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - CompareText2ImgPageResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.compare_ai_image_generators.compare_text2img( - text_prompt="text_prompt", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v2/CompareText2Img/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "text_prompt": text_prompt, - "negative_prompt": negative_prompt, - "output_width": output_width, - "output_height": output_height, - "num_outputs": num_outputs, - "quality": quality, - "dall_e_3_quality": dall_e3quality, - "dall_e_3_style": dall_e3style, - "guidance_scale": guidance_scale, - "seed": seed, - "sd_2_upscaling": sd2upscaling, - "selected_models": selected_models, - "scheduler": scheduler, - "edit_instruction": edit_instruction, - "image_guidance_scale": image_guidance_scale, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(CompareText2ImgPageResponse, parse_obj_as(type_=CompareText2ImgPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def async_compare_text2img( - self, - *, - text_prompt: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - dall_e3quality: typing.Optional[str] = OMIT, - dall_e3style: typing.Optional[str] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - seed: typing.Optional[int] = OMIT, - sd2upscaling: typing.Optional[bool] = OMIT, - selected_models: typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] = OMIT, - scheduler: typing.Optional[CompareText2ImgPageRequestScheduler] = OMIT, - edit_instruction: typing.Optional[str] = OMIT, - image_guidance_scale: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - text_prompt : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - negative_prompt : typing.Optional[str] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - dall_e3quality : typing.Optional[str] - - dall_e3style : typing.Optional[str] - - guidance_scale : typing.Optional[float] - - seed : typing.Optional[int] - - sd2upscaling : typing.Optional[bool] - - selected_models : typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] - - scheduler : typing.Optional[CompareText2ImgPageRequestScheduler] - - edit_instruction : typing.Optional[str] - - image_guidance_scale : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.compare_ai_image_generators.async_compare_text2img( - text_prompt="text_prompt", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/CompareText2Img/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "text_prompt": text_prompt, - "negative_prompt": negative_prompt, - "output_width": output_width, - "output_height": output_height, - "num_outputs": num_outputs, - "quality": quality, - "dall_e_3_quality": dall_e3quality, - "dall_e_3_style": dall_e3style, - "guidance_scale": guidance_scale, - "seed": seed, - "sd_2_upscaling": sd2upscaling, - "selected_models": selected_models, - "scheduler": scheduler, - "edit_instruction": edit_instruction, - "image_guidance_scale": image_guidance_scale, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def status_compare_text2img( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> CompareText2ImgPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - CompareText2ImgPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.compare_ai_image_generators.status_compare_text2img( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/CompareText2Img/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(CompareText2ImgPageStatusResponse, parse_obj_as(type_=CompareText2ImgPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncCompareAiImageGeneratorsClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def compare_text2img( - self, - *, - text_prompt: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - dall_e3quality: typing.Optional[str] = OMIT, - dall_e3style: typing.Optional[str] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - seed: typing.Optional[int] = OMIT, - sd2upscaling: typing.Optional[bool] = OMIT, - selected_models: typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] = OMIT, - scheduler: typing.Optional[CompareText2ImgPageRequestScheduler] = OMIT, - edit_instruction: typing.Optional[str] = OMIT, - image_guidance_scale: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> CompareText2ImgPageResponse: - """ - Parameters - ---------- - text_prompt : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - negative_prompt : typing.Optional[str] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - dall_e3quality : typing.Optional[str] - - dall_e3style : typing.Optional[str] - - guidance_scale : typing.Optional[float] - - seed : typing.Optional[int] - - sd2upscaling : typing.Optional[bool] - - selected_models : typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] - - scheduler : typing.Optional[CompareText2ImgPageRequestScheduler] - - edit_instruction : typing.Optional[str] - - image_guidance_scale : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - CompareText2ImgPageResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.compare_ai_image_generators.compare_text2img( - text_prompt="text_prompt", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v2/CompareText2Img/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "text_prompt": text_prompt, - "negative_prompt": negative_prompt, - "output_width": output_width, - "output_height": output_height, - "num_outputs": num_outputs, - "quality": quality, - "dall_e_3_quality": dall_e3quality, - "dall_e_3_style": dall_e3style, - "guidance_scale": guidance_scale, - "seed": seed, - "sd_2_upscaling": sd2upscaling, - "selected_models": selected_models, - "scheduler": scheduler, - "edit_instruction": edit_instruction, - "image_guidance_scale": image_guidance_scale, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(CompareText2ImgPageResponse, parse_obj_as(type_=CompareText2ImgPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def async_compare_text2img( - self, - *, - text_prompt: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - dall_e3quality: typing.Optional[str] = OMIT, - dall_e3style: typing.Optional[str] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - seed: typing.Optional[int] = OMIT, - sd2upscaling: typing.Optional[bool] = OMIT, - selected_models: typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] = OMIT, - scheduler: typing.Optional[CompareText2ImgPageRequestScheduler] = OMIT, - edit_instruction: typing.Optional[str] = OMIT, - image_guidance_scale: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - text_prompt : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - negative_prompt : typing.Optional[str] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - dall_e3quality : typing.Optional[str] - - dall_e3style : typing.Optional[str] - - guidance_scale : typing.Optional[float] - - seed : typing.Optional[int] - - sd2upscaling : typing.Optional[bool] - - selected_models : typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] - - scheduler : typing.Optional[CompareText2ImgPageRequestScheduler] - - edit_instruction : typing.Optional[str] - - image_guidance_scale : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.compare_ai_image_generators.async_compare_text2img( - text_prompt="text_prompt", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/CompareText2Img/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "text_prompt": text_prompt, - "negative_prompt": negative_prompt, - "output_width": output_width, - "output_height": output_height, - "num_outputs": num_outputs, - "quality": quality, - "dall_e_3_quality": dall_e3quality, - "dall_e_3_style": dall_e3style, - "guidance_scale": guidance_scale, - "seed": seed, - "sd_2_upscaling": sd2upscaling, - "selected_models": selected_models, - "scheduler": scheduler, - "edit_instruction": edit_instruction, - "image_guidance_scale": image_guidance_scale, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def status_compare_text2img( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> CompareText2ImgPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - CompareText2ImgPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.compare_ai_image_generators.status_compare_text2img( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/CompareText2Img/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(CompareText2ImgPageStatusResponse, parse_obj_as(type_=CompareText2ImgPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/compare_ai_image_upscalers/__init__.py b/src/gooey/compare_ai_image_upscalers/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/compare_ai_image_upscalers/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/compare_ai_image_upscalers/client.py b/src/gooey/compare_ai_image_upscalers/client.py deleted file mode 100644 index b6b46ba..0000000 --- a/src/gooey/compare_ai_image_upscalers/client.py +++ /dev/null @@ -1,537 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.internal_server_error import InternalServerError -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3 -from ..types.compare_upscaler_page_request_selected_models_item import CompareUpscalerPageRequestSelectedModelsItem -from ..types.compare_upscaler_page_response import CompareUpscalerPageResponse -from ..types.compare_upscaler_page_status_response import CompareUpscalerPageStatusResponse -from ..types.failed_reponse_model_v2 import FailedReponseModelV2 -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError -from ..types.recipe_function import RecipeFunction -from ..types.run_settings import RunSettings - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class CompareAiImageUpscalersClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def compare_ai_upscalers( - self, - *, - scale: int, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - input_image: typing.Optional[str] = OMIT, - input_video: typing.Optional[str] = OMIT, - selected_models: typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]] = OMIT, - selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> CompareUpscalerPageResponse: - """ - Parameters - ---------- - scale : int - The final upsampling scale of the image - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - input_image : typing.Optional[str] - Input Image - - input_video : typing.Optional[str] - Input Video - - selected_models : typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]] - - selected_bg_model : typing.Optional[typing.Literal["real_esrgan_x2"]] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - CompareUpscalerPageResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.compare_ai_image_upscalers.compare_ai_upscalers( - scale=1, - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v2/compare-ai-upscalers/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "input_video": input_video, - "scale": scale, - "selected_models": selected_models, - "selected_bg_model": selected_bg_model, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(CompareUpscalerPageResponse, parse_obj_as(type_=CompareUpscalerPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def async_compare_ai_upscalers( - self, - *, - scale: int, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - input_image: typing.Optional[str] = OMIT, - input_video: typing.Optional[str] = OMIT, - selected_models: typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]] = OMIT, - selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - scale : int - The final upsampling scale of the image - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - input_image : typing.Optional[str] - Input Image - - input_video : typing.Optional[str] - Input Video - - selected_models : typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]] - - selected_bg_model : typing.Optional[typing.Literal["real_esrgan_x2"]] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.compare_ai_image_upscalers.async_compare_ai_upscalers( - scale=1, - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/compare-ai-upscalers/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "input_video": input_video, - "scale": scale, - "selected_models": selected_models, - "selected_bg_model": selected_bg_model, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def status_compare_ai_upscalers( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> CompareUpscalerPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - CompareUpscalerPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.compare_ai_image_upscalers.status_compare_ai_upscalers( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/compare-ai-upscalers/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(CompareUpscalerPageStatusResponse, parse_obj_as(type_=CompareUpscalerPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncCompareAiImageUpscalersClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def compare_ai_upscalers( - self, - *, - scale: int, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - input_image: typing.Optional[str] = OMIT, - input_video: typing.Optional[str] = OMIT, - selected_models: typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]] = OMIT, - selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> CompareUpscalerPageResponse: - """ - Parameters - ---------- - scale : int - The final upsampling scale of the image - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - input_image : typing.Optional[str] - Input Image - - input_video : typing.Optional[str] - Input Video - - selected_models : typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]] - - selected_bg_model : typing.Optional[typing.Literal["real_esrgan_x2"]] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - CompareUpscalerPageResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.compare_ai_image_upscalers.compare_ai_upscalers( - scale=1, - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v2/compare-ai-upscalers/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "input_video": input_video, - "scale": scale, - "selected_models": selected_models, - "selected_bg_model": selected_bg_model, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(CompareUpscalerPageResponse, parse_obj_as(type_=CompareUpscalerPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def async_compare_ai_upscalers( - self, - *, - scale: int, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - input_image: typing.Optional[str] = OMIT, - input_video: typing.Optional[str] = OMIT, - selected_models: typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]] = OMIT, - selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - scale : int - The final upsampling scale of the image - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - input_image : typing.Optional[str] - Input Image - - input_video : typing.Optional[str] - Input Video - - selected_models : typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]] - - selected_bg_model : typing.Optional[typing.Literal["real_esrgan_x2"]] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.compare_ai_image_upscalers.async_compare_ai_upscalers( - scale=1, - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/compare-ai-upscalers/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "input_video": input_video, - "scale": scale, - "selected_models": selected_models, - "selected_bg_model": selected_bg_model, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def status_compare_ai_upscalers( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> CompareUpscalerPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - CompareUpscalerPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.compare_ai_image_upscalers.status_compare_ai_upscalers( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/compare-ai-upscalers/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(CompareUpscalerPageStatusResponse, parse_obj_as(type_=CompareUpscalerPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/compare_ai_translations/__init__.py b/src/gooey/compare_ai_translations/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/compare_ai_translations/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/compare_ai_translations/client.py b/src/gooey/compare_ai_translations/client.py deleted file mode 100644 index 9e58f6c..0000000 --- a/src/gooey/compare_ai_translations/client.py +++ /dev/null @@ -1,525 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.internal_server_error import InternalServerError -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3 -from ..types.failed_reponse_model_v2 import FailedReponseModelV2 -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError -from ..types.recipe_function import RecipeFunction -from ..types.run_settings import RunSettings -from ..types.translation_page_request_selected_model import TranslationPageRequestSelectedModel -from ..types.translation_page_response import TranslationPageResponse -from ..types.translation_page_status_response import TranslationPageStatusResponse - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class CompareAiTranslationsClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def translate( - self, - *, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - texts: typing.Optional[typing.Sequence[str]] = OMIT, - selected_model: typing.Optional[TranslationPageRequestSelectedModel] = OMIT, - translation_source: typing.Optional[str] = OMIT, - translation_target: typing.Optional[str] = OMIT, - glossary_document: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> TranslationPageResponse: - """ - Parameters - ---------- - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - texts : typing.Optional[typing.Sequence[str]] - - selected_model : typing.Optional[TranslationPageRequestSelectedModel] - - translation_source : typing.Optional[str] - - translation_target : typing.Optional[str] - - glossary_document : typing.Optional[str] - Provide a glossary to customize translation and improve accuracy of domain-specific terms. - If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - TranslationPageResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.compare_ai_translations.translate() - """ - _response = self._client_wrapper.httpx_client.request( - "v2/translate/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "texts": texts, - "selected_model": selected_model, - "translation_source": translation_source, - "translation_target": translation_target, - "glossary_document": glossary_document, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(TranslationPageResponse, parse_obj_as(type_=TranslationPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def async_translate( - self, - *, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - texts: typing.Optional[typing.Sequence[str]] = OMIT, - selected_model: typing.Optional[TranslationPageRequestSelectedModel] = OMIT, - translation_source: typing.Optional[str] = OMIT, - translation_target: typing.Optional[str] = OMIT, - glossary_document: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - texts : typing.Optional[typing.Sequence[str]] - - selected_model : typing.Optional[TranslationPageRequestSelectedModel] - - translation_source : typing.Optional[str] - - translation_target : typing.Optional[str] - - glossary_document : typing.Optional[str] - Provide a glossary to customize translation and improve accuracy of domain-specific terms. - If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.compare_ai_translations.async_translate() - """ - _response = self._client_wrapper.httpx_client.request( - "v3/translate/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "texts": texts, - "selected_model": selected_model, - "translation_source": translation_source, - "translation_target": translation_target, - "glossary_document": glossary_document, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def status_translate( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> TranslationPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - TranslationPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.compare_ai_translations.status_translate( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/translate/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(TranslationPageStatusResponse, parse_obj_as(type_=TranslationPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncCompareAiTranslationsClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def translate( - self, - *, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - texts: typing.Optional[typing.Sequence[str]] = OMIT, - selected_model: typing.Optional[TranslationPageRequestSelectedModel] = OMIT, - translation_source: typing.Optional[str] = OMIT, - translation_target: typing.Optional[str] = OMIT, - glossary_document: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> TranslationPageResponse: - """ - Parameters - ---------- - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - texts : typing.Optional[typing.Sequence[str]] - - selected_model : typing.Optional[TranslationPageRequestSelectedModel] - - translation_source : typing.Optional[str] - - translation_target : typing.Optional[str] - - glossary_document : typing.Optional[str] - Provide a glossary to customize translation and improve accuracy of domain-specific terms. - If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - TranslationPageResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.compare_ai_translations.translate() - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v2/translate/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "texts": texts, - "selected_model": selected_model, - "translation_source": translation_source, - "translation_target": translation_target, - "glossary_document": glossary_document, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(TranslationPageResponse, parse_obj_as(type_=TranslationPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def async_translate( - self, - *, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - texts: typing.Optional[typing.Sequence[str]] = OMIT, - selected_model: typing.Optional[TranslationPageRequestSelectedModel] = OMIT, - translation_source: typing.Optional[str] = OMIT, - translation_target: typing.Optional[str] = OMIT, - glossary_document: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - texts : typing.Optional[typing.Sequence[str]] - - selected_model : typing.Optional[TranslationPageRequestSelectedModel] - - translation_source : typing.Optional[str] - - translation_target : typing.Optional[str] - - glossary_document : typing.Optional[str] - Provide a glossary to customize translation and improve accuracy of domain-specific terms. - If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.compare_ai_translations.async_translate() - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/translate/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "texts": texts, - "selected_model": selected_model, - "translation_source": translation_source, - "translation_target": translation_target, - "glossary_document": glossary_document, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def status_translate( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> TranslationPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - TranslationPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.compare_ai_translations.status_translate( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/translate/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(TranslationPageStatusResponse, parse_obj_as(type_=TranslationPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/compare_ai_voice_generators/__init__.py b/src/gooey/compare_ai_voice_generators/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/compare_ai_voice_generators/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/compare_ai_voice_generators/client.py b/src/gooey/compare_ai_voice_generators/client.py deleted file mode 100644 index ebc631c..0000000 --- a/src/gooey/compare_ai_voice_generators/client.py +++ /dev/null @@ -1,755 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.internal_server_error import InternalServerError -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3 -from ..types.failed_reponse_model_v2 import FailedReponseModelV2 -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError -from ..types.recipe_function import RecipeFunction -from ..types.run_settings import RunSettings -from ..types.text_to_speech_page_request_openai_tts_model import TextToSpeechPageRequestOpenaiTtsModel -from ..types.text_to_speech_page_request_openai_voice_name import TextToSpeechPageRequestOpenaiVoiceName -from ..types.text_to_speech_page_request_tts_provider import TextToSpeechPageRequestTtsProvider -from ..types.text_to_speech_page_response import TextToSpeechPageResponse -from ..types.text_to_speech_page_status_response import TextToSpeechPageStatusResponse - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class CompareAiVoiceGeneratorsClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def text_to_speech( - self, - *, - text_prompt: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - tts_provider: typing.Optional[TextToSpeechPageRequestTtsProvider] = OMIT, - uberduck_voice_name: typing.Optional[str] = OMIT, - uberduck_speaking_rate: typing.Optional[float] = OMIT, - google_voice_name: typing.Optional[str] = OMIT, - google_speaking_rate: typing.Optional[float] = OMIT, - google_pitch: typing.Optional[float] = OMIT, - bark_history_prompt: typing.Optional[str] = OMIT, - elevenlabs_voice_name: typing.Optional[str] = OMIT, - elevenlabs_api_key: typing.Optional[str] = OMIT, - elevenlabs_voice_id: typing.Optional[str] = OMIT, - elevenlabs_model: typing.Optional[str] = OMIT, - elevenlabs_stability: typing.Optional[float] = OMIT, - elevenlabs_similarity_boost: typing.Optional[float] = OMIT, - elevenlabs_style: typing.Optional[float] = OMIT, - elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, - azure_voice_name: typing.Optional[str] = OMIT, - openai_voice_name: typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] = OMIT, - openai_tts_model: typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> TextToSpeechPageResponse: - """ - Parameters - ---------- - text_prompt : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - tts_provider : typing.Optional[TextToSpeechPageRequestTtsProvider] - - uberduck_voice_name : typing.Optional[str] - - uberduck_speaking_rate : typing.Optional[float] - - google_voice_name : typing.Optional[str] - - google_speaking_rate : typing.Optional[float] - - google_pitch : typing.Optional[float] - - bark_history_prompt : typing.Optional[str] - - elevenlabs_voice_name : typing.Optional[str] - Use `elevenlabs_voice_id` instead - - elevenlabs_api_key : typing.Optional[str] - - elevenlabs_voice_id : typing.Optional[str] - - elevenlabs_model : typing.Optional[str] - - elevenlabs_stability : typing.Optional[float] - - elevenlabs_similarity_boost : typing.Optional[float] - - elevenlabs_style : typing.Optional[float] - - elevenlabs_speaker_boost : typing.Optional[bool] - - azure_voice_name : typing.Optional[str] - - openai_voice_name : typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] - - openai_tts_model : typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - TextToSpeechPageResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.compare_ai_voice_generators.text_to_speech( - text_prompt="text_prompt", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v2/TextToSpeech/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "text_prompt": text_prompt, - "tts_provider": tts_provider, - "uberduck_voice_name": uberduck_voice_name, - "uberduck_speaking_rate": uberduck_speaking_rate, - "google_voice_name": google_voice_name, - "google_speaking_rate": google_speaking_rate, - "google_pitch": google_pitch, - "bark_history_prompt": bark_history_prompt, - "elevenlabs_voice_name": elevenlabs_voice_name, - "elevenlabs_api_key": elevenlabs_api_key, - "elevenlabs_voice_id": elevenlabs_voice_id, - "elevenlabs_model": elevenlabs_model, - "elevenlabs_stability": elevenlabs_stability, - "elevenlabs_similarity_boost": elevenlabs_similarity_boost, - "elevenlabs_style": elevenlabs_style, - "elevenlabs_speaker_boost": elevenlabs_speaker_boost, - "azure_voice_name": azure_voice_name, - "openai_voice_name": openai_voice_name, - "openai_tts_model": openai_tts_model, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(TextToSpeechPageResponse, parse_obj_as(type_=TextToSpeechPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def async_text_to_speech( - self, - *, - text_prompt: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - tts_provider: typing.Optional[TextToSpeechPageRequestTtsProvider] = OMIT, - uberduck_voice_name: typing.Optional[str] = OMIT, - uberduck_speaking_rate: typing.Optional[float] = OMIT, - google_voice_name: typing.Optional[str] = OMIT, - google_speaking_rate: typing.Optional[float] = OMIT, - google_pitch: typing.Optional[float] = OMIT, - bark_history_prompt: typing.Optional[str] = OMIT, - elevenlabs_voice_name: typing.Optional[str] = OMIT, - elevenlabs_api_key: typing.Optional[str] = OMIT, - elevenlabs_voice_id: typing.Optional[str] = OMIT, - elevenlabs_model: typing.Optional[str] = OMIT, - elevenlabs_stability: typing.Optional[float] = OMIT, - elevenlabs_similarity_boost: typing.Optional[float] = OMIT, - elevenlabs_style: typing.Optional[float] = OMIT, - elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, - azure_voice_name: typing.Optional[str] = OMIT, - openai_voice_name: typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] = OMIT, - openai_tts_model: typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - text_prompt : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - tts_provider : typing.Optional[TextToSpeechPageRequestTtsProvider] - - uberduck_voice_name : typing.Optional[str] - - uberduck_speaking_rate : typing.Optional[float] - - google_voice_name : typing.Optional[str] - - google_speaking_rate : typing.Optional[float] - - google_pitch : typing.Optional[float] - - bark_history_prompt : typing.Optional[str] - - elevenlabs_voice_name : typing.Optional[str] - Use `elevenlabs_voice_id` instead - - elevenlabs_api_key : typing.Optional[str] - - elevenlabs_voice_id : typing.Optional[str] - - elevenlabs_model : typing.Optional[str] - - elevenlabs_stability : typing.Optional[float] - - elevenlabs_similarity_boost : typing.Optional[float] - - elevenlabs_style : typing.Optional[float] - - elevenlabs_speaker_boost : typing.Optional[bool] - - azure_voice_name : typing.Optional[str] - - openai_voice_name : typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] - - openai_tts_model : typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.compare_ai_voice_generators.async_text_to_speech( - text_prompt="text_prompt", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/TextToSpeech/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "text_prompt": text_prompt, - "tts_provider": tts_provider, - "uberduck_voice_name": uberduck_voice_name, - "uberduck_speaking_rate": uberduck_speaking_rate, - "google_voice_name": google_voice_name, - "google_speaking_rate": google_speaking_rate, - "google_pitch": google_pitch, - "bark_history_prompt": bark_history_prompt, - "elevenlabs_voice_name": elevenlabs_voice_name, - "elevenlabs_api_key": elevenlabs_api_key, - "elevenlabs_voice_id": elevenlabs_voice_id, - "elevenlabs_model": elevenlabs_model, - "elevenlabs_stability": elevenlabs_stability, - "elevenlabs_similarity_boost": elevenlabs_similarity_boost, - "elevenlabs_style": elevenlabs_style, - "elevenlabs_speaker_boost": elevenlabs_speaker_boost, - "azure_voice_name": azure_voice_name, - "openai_voice_name": openai_voice_name, - "openai_tts_model": openai_tts_model, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def status_text_to_speech( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> TextToSpeechPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - TextToSpeechPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.compare_ai_voice_generators.status_text_to_speech( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/TextToSpeech/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(TextToSpeechPageStatusResponse, parse_obj_as(type_=TextToSpeechPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncCompareAiVoiceGeneratorsClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def text_to_speech( - self, - *, - text_prompt: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - tts_provider: typing.Optional[TextToSpeechPageRequestTtsProvider] = OMIT, - uberduck_voice_name: typing.Optional[str] = OMIT, - uberduck_speaking_rate: typing.Optional[float] = OMIT, - google_voice_name: typing.Optional[str] = OMIT, - google_speaking_rate: typing.Optional[float] = OMIT, - google_pitch: typing.Optional[float] = OMIT, - bark_history_prompt: typing.Optional[str] = OMIT, - elevenlabs_voice_name: typing.Optional[str] = OMIT, - elevenlabs_api_key: typing.Optional[str] = OMIT, - elevenlabs_voice_id: typing.Optional[str] = OMIT, - elevenlabs_model: typing.Optional[str] = OMIT, - elevenlabs_stability: typing.Optional[float] = OMIT, - elevenlabs_similarity_boost: typing.Optional[float] = OMIT, - elevenlabs_style: typing.Optional[float] = OMIT, - elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, - azure_voice_name: typing.Optional[str] = OMIT, - openai_voice_name: typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] = OMIT, - openai_tts_model: typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> TextToSpeechPageResponse: - """ - Parameters - ---------- - text_prompt : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - tts_provider : typing.Optional[TextToSpeechPageRequestTtsProvider] - - uberduck_voice_name : typing.Optional[str] - - uberduck_speaking_rate : typing.Optional[float] - - google_voice_name : typing.Optional[str] - - google_speaking_rate : typing.Optional[float] - - google_pitch : typing.Optional[float] - - bark_history_prompt : typing.Optional[str] - - elevenlabs_voice_name : typing.Optional[str] - Use `elevenlabs_voice_id` instead - - elevenlabs_api_key : typing.Optional[str] - - elevenlabs_voice_id : typing.Optional[str] - - elevenlabs_model : typing.Optional[str] - - elevenlabs_stability : typing.Optional[float] - - elevenlabs_similarity_boost : typing.Optional[float] - - elevenlabs_style : typing.Optional[float] - - elevenlabs_speaker_boost : typing.Optional[bool] - - azure_voice_name : typing.Optional[str] - - openai_voice_name : typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] - - openai_tts_model : typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - TextToSpeechPageResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.compare_ai_voice_generators.text_to_speech( - text_prompt="text_prompt", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v2/TextToSpeech/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "text_prompt": text_prompt, - "tts_provider": tts_provider, - "uberduck_voice_name": uberduck_voice_name, - "uberduck_speaking_rate": uberduck_speaking_rate, - "google_voice_name": google_voice_name, - "google_speaking_rate": google_speaking_rate, - "google_pitch": google_pitch, - "bark_history_prompt": bark_history_prompt, - "elevenlabs_voice_name": elevenlabs_voice_name, - "elevenlabs_api_key": elevenlabs_api_key, - "elevenlabs_voice_id": elevenlabs_voice_id, - "elevenlabs_model": elevenlabs_model, - "elevenlabs_stability": elevenlabs_stability, - "elevenlabs_similarity_boost": elevenlabs_similarity_boost, - "elevenlabs_style": elevenlabs_style, - "elevenlabs_speaker_boost": elevenlabs_speaker_boost, - "azure_voice_name": azure_voice_name, - "openai_voice_name": openai_voice_name, - "openai_tts_model": openai_tts_model, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(TextToSpeechPageResponse, parse_obj_as(type_=TextToSpeechPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def async_text_to_speech( - self, - *, - text_prompt: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - tts_provider: typing.Optional[TextToSpeechPageRequestTtsProvider] = OMIT, - uberduck_voice_name: typing.Optional[str] = OMIT, - uberduck_speaking_rate: typing.Optional[float] = OMIT, - google_voice_name: typing.Optional[str] = OMIT, - google_speaking_rate: typing.Optional[float] = OMIT, - google_pitch: typing.Optional[float] = OMIT, - bark_history_prompt: typing.Optional[str] = OMIT, - elevenlabs_voice_name: typing.Optional[str] = OMIT, - elevenlabs_api_key: typing.Optional[str] = OMIT, - elevenlabs_voice_id: typing.Optional[str] = OMIT, - elevenlabs_model: typing.Optional[str] = OMIT, - elevenlabs_stability: typing.Optional[float] = OMIT, - elevenlabs_similarity_boost: typing.Optional[float] = OMIT, - elevenlabs_style: typing.Optional[float] = OMIT, - elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, - azure_voice_name: typing.Optional[str] = OMIT, - openai_voice_name: typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] = OMIT, - openai_tts_model: typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - text_prompt : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - tts_provider : typing.Optional[TextToSpeechPageRequestTtsProvider] - - uberduck_voice_name : typing.Optional[str] - - uberduck_speaking_rate : typing.Optional[float] - - google_voice_name : typing.Optional[str] - - google_speaking_rate : typing.Optional[float] - - google_pitch : typing.Optional[float] - - bark_history_prompt : typing.Optional[str] - - elevenlabs_voice_name : typing.Optional[str] - Use `elevenlabs_voice_id` instead - - elevenlabs_api_key : typing.Optional[str] - - elevenlabs_voice_id : typing.Optional[str] - - elevenlabs_model : typing.Optional[str] - - elevenlabs_stability : typing.Optional[float] - - elevenlabs_similarity_boost : typing.Optional[float] - - elevenlabs_style : typing.Optional[float] - - elevenlabs_speaker_boost : typing.Optional[bool] - - azure_voice_name : typing.Optional[str] - - openai_voice_name : typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] - - openai_tts_model : typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.compare_ai_voice_generators.async_text_to_speech( - text_prompt="text_prompt", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/TextToSpeech/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "text_prompt": text_prompt, - "tts_provider": tts_provider, - "uberduck_voice_name": uberduck_voice_name, - "uberduck_speaking_rate": uberduck_speaking_rate, - "google_voice_name": google_voice_name, - "google_speaking_rate": google_speaking_rate, - "google_pitch": google_pitch, - "bark_history_prompt": bark_history_prompt, - "elevenlabs_voice_name": elevenlabs_voice_name, - "elevenlabs_api_key": elevenlabs_api_key, - "elevenlabs_voice_id": elevenlabs_voice_id, - "elevenlabs_model": elevenlabs_model, - "elevenlabs_stability": elevenlabs_stability, - "elevenlabs_similarity_boost": elevenlabs_similarity_boost, - "elevenlabs_style": elevenlabs_style, - "elevenlabs_speaker_boost": elevenlabs_speaker_boost, - "azure_voice_name": azure_voice_name, - "openai_voice_name": openai_voice_name, - "openai_tts_model": openai_tts_model, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def status_text_to_speech( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> TextToSpeechPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - TextToSpeechPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.compare_ai_voice_generators.status_text_to_speech( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/TextToSpeech/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(TextToSpeechPageStatusResponse, parse_obj_as(type_=TextToSpeechPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/copilot_for_your_enterprise/__init__.py b/src/gooey/copilot_for_your_enterprise/__init__.py index f3ea265..f1637db 100644 --- a/src/gooey/copilot_for_your_enterprise/__init__.py +++ b/src/gooey/copilot_for_your_enterprise/__init__.py @@ -1,2 +1,27 @@ # This file was auto-generated by Fern from our API Definition. +from .types import ( + VideoBotsPageRequestAsrModel, + VideoBotsPageRequestCitationStyle, + VideoBotsPageRequestEmbeddingModel, + VideoBotsPageRequestLipsyncModel, + VideoBotsPageRequestOpenaiTtsModel, + VideoBotsPageRequestOpenaiVoiceName, + VideoBotsPageRequestResponseFormatType, + VideoBotsPageRequestSelectedModel, + VideoBotsPageRequestTranslationModel, + VideoBotsPageRequestTtsProvider, +) + +__all__ = [ + "VideoBotsPageRequestAsrModel", + "VideoBotsPageRequestCitationStyle", + "VideoBotsPageRequestEmbeddingModel", + "VideoBotsPageRequestLipsyncModel", + "VideoBotsPageRequestOpenaiTtsModel", + "VideoBotsPageRequestOpenaiVoiceName", + "VideoBotsPageRequestResponseFormatType", + "VideoBotsPageRequestSelectedModel", + "VideoBotsPageRequestTranslationModel", + "VideoBotsPageRequestTtsProvider", +] diff --git a/src/gooey/copilot_for_your_enterprise/client.py b/src/gooey/copilot_for_your_enterprise/client.py index e075cbe..13a438a 100644 --- a/src/gooey/copilot_for_your_enterprise/client.py +++ b/src/gooey/copilot_for_your_enterprise/client.py @@ -7,30 +7,27 @@ from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper from ..core.pydantic_utilities import parse_obj_as from ..core.request_options import RequestOptions -from ..errors.internal_server_error import InternalServerError from ..errors.payment_required_error import PaymentRequiredError from ..errors.too_many_requests_error import TooManyRequestsError from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3 from ..types.conversation_entry import ConversationEntry -from ..types.failed_reponse_model_v2 import FailedReponseModelV2 from ..types.generic_error_response import GenericErrorResponse from ..types.http_validation_error import HttpValidationError from ..types.llm_tools import LlmTools from ..types.recipe_function import RecipeFunction from ..types.run_settings import RunSettings from ..types.sad_talker_settings import SadTalkerSettings -from ..types.video_bots_page_request_asr_model import VideoBotsPageRequestAsrModel -from ..types.video_bots_page_request_citation_style import VideoBotsPageRequestCitationStyle -from ..types.video_bots_page_request_embedding_model import VideoBotsPageRequestEmbeddingModel -from ..types.video_bots_page_request_lipsync_model import VideoBotsPageRequestLipsyncModel -from ..types.video_bots_page_request_openai_tts_model import VideoBotsPageRequestOpenaiTtsModel -from ..types.video_bots_page_request_openai_voice_name import VideoBotsPageRequestOpenaiVoiceName -from ..types.video_bots_page_request_selected_model import VideoBotsPageRequestSelectedModel -from ..types.video_bots_page_request_translation_model import VideoBotsPageRequestTranslationModel -from ..types.video_bots_page_request_tts_provider import VideoBotsPageRequestTtsProvider from ..types.video_bots_page_response import VideoBotsPageResponse -from ..types.video_bots_page_status_response import VideoBotsPageStatusResponse +from .types.video_bots_page_request_asr_model import VideoBotsPageRequestAsrModel +from .types.video_bots_page_request_citation_style import VideoBotsPageRequestCitationStyle +from .types.video_bots_page_request_embedding_model import VideoBotsPageRequestEmbeddingModel +from .types.video_bots_page_request_lipsync_model import VideoBotsPageRequestLipsyncModel +from .types.video_bots_page_request_openai_tts_model import VideoBotsPageRequestOpenaiTtsModel +from .types.video_bots_page_request_openai_voice_name import VideoBotsPageRequestOpenaiVoiceName +from .types.video_bots_page_request_response_format_type import VideoBotsPageRequestResponseFormatType +from .types.video_bots_page_request_selected_model import VideoBotsPageRequestSelectedModel +from .types.video_bots_page_request_translation_model import VideoBotsPageRequestTranslationModel +from .types.video_bots_page_request_tts_provider import VideoBotsPageRequestTtsProvider # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -40,681 +37,10 @@ class CopilotForYourEnterpriseClient: def __init__(self, *, client_wrapper: SyncClientWrapper): self._client_wrapper = client_wrapper - def video_bots( - self, - *, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - input_prompt: typing.Optional[str] = OMIT, - input_audio: typing.Optional[str] = OMIT, - input_images: typing.Optional[typing.Sequence[str]] = OMIT, - input_documents: typing.Optional[typing.Sequence[str]] = OMIT, - doc_extract_url: typing.Optional[str] = OMIT, - messages: typing.Optional[typing.Sequence[ConversationEntry]] = OMIT, - bot_script: typing.Optional[str] = OMIT, - selected_model: typing.Optional[VideoBotsPageRequestSelectedModel] = OMIT, - document_model: typing.Optional[str] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - query_instructions: typing.Optional[str] = OMIT, - keyword_instructions: typing.Optional[str] = OMIT, - documents: typing.Optional[typing.Sequence[str]] = OMIT, - max_references: typing.Optional[int] = OMIT, - max_context_words: typing.Optional[int] = OMIT, - scroll_jump: typing.Optional[int] = OMIT, - embedding_model: typing.Optional[VideoBotsPageRequestEmbeddingModel] = OMIT, - dense_weight: typing.Optional[float] = OMIT, - citation_style: typing.Optional[VideoBotsPageRequestCitationStyle] = OMIT, - use_url_shortener: typing.Optional[bool] = OMIT, - asr_model: typing.Optional[VideoBotsPageRequestAsrModel] = OMIT, - asr_language: typing.Optional[str] = OMIT, - translation_model: typing.Optional[VideoBotsPageRequestTranslationModel] = OMIT, - user_language: typing.Optional[str] = OMIT, - input_glossary_document: typing.Optional[str] = OMIT, - output_glossary_document: typing.Optional[str] = OMIT, - lipsync_model: typing.Optional[VideoBotsPageRequestLipsyncModel] = OMIT, - tools: typing.Optional[typing.Sequence[LlmTools]] = OMIT, - tts_provider: typing.Optional[VideoBotsPageRequestTtsProvider] = OMIT, - uberduck_voice_name: typing.Optional[str] = OMIT, - uberduck_speaking_rate: typing.Optional[float] = OMIT, - google_voice_name: typing.Optional[str] = OMIT, - google_speaking_rate: typing.Optional[float] = OMIT, - google_pitch: typing.Optional[float] = OMIT, - bark_history_prompt: typing.Optional[str] = OMIT, - elevenlabs_voice_name: typing.Optional[str] = OMIT, - elevenlabs_api_key: typing.Optional[str] = OMIT, - elevenlabs_voice_id: typing.Optional[str] = OMIT, - elevenlabs_model: typing.Optional[str] = OMIT, - elevenlabs_stability: typing.Optional[float] = OMIT, - elevenlabs_similarity_boost: typing.Optional[float] = OMIT, - elevenlabs_style: typing.Optional[float] = OMIT, - elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, - azure_voice_name: typing.Optional[str] = OMIT, - openai_voice_name: typing.Optional[VideoBotsPageRequestOpenaiVoiceName] = OMIT, - openai_tts_model: typing.Optional[VideoBotsPageRequestOpenaiTtsModel] = OMIT, - input_face: typing.Optional[str] = OMIT, - face_padding_top: typing.Optional[int] = OMIT, - face_padding_bottom: typing.Optional[int] = OMIT, - face_padding_left: typing.Optional[int] = OMIT, - face_padding_right: typing.Optional[int] = OMIT, - sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> VideoBotsPageResponse: - """ - Parameters - ---------- - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - input_prompt : typing.Optional[str] - - input_audio : typing.Optional[str] - - input_images : typing.Optional[typing.Sequence[str]] - - input_documents : typing.Optional[typing.Sequence[str]] - - doc_extract_url : typing.Optional[str] - Select a workflow to extract text from documents and images. - - messages : typing.Optional[typing.Sequence[ConversationEntry]] - - bot_script : typing.Optional[str] - - selected_model : typing.Optional[VideoBotsPageRequestSelectedModel] - - document_model : typing.Optional[str] - When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - keyword_instructions : typing.Optional[str] - - documents : typing.Optional[typing.Sequence[str]] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - embedding_model : typing.Optional[VideoBotsPageRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - citation_style : typing.Optional[VideoBotsPageRequestCitationStyle] - - use_url_shortener : typing.Optional[bool] - - asr_model : typing.Optional[VideoBotsPageRequestAsrModel] - Choose a model to transcribe incoming audio messages to text. - - asr_language : typing.Optional[str] - Choose a language to transcribe incoming audio messages to text. - - translation_model : typing.Optional[VideoBotsPageRequestTranslationModel] - - user_language : typing.Optional[str] - Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. - - input_glossary_document : typing.Optional[str] - Translation Glossary for User Langauge -> LLM Language (English) - - output_glossary_document : typing.Optional[str] - Translation Glossary for LLM Language (English) -> User Langauge - - lipsync_model : typing.Optional[VideoBotsPageRequestLipsyncModel] - - tools : typing.Optional[typing.Sequence[LlmTools]] - Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). - - tts_provider : typing.Optional[VideoBotsPageRequestTtsProvider] - - uberduck_voice_name : typing.Optional[str] - - uberduck_speaking_rate : typing.Optional[float] - - google_voice_name : typing.Optional[str] - - google_speaking_rate : typing.Optional[float] - - google_pitch : typing.Optional[float] - - bark_history_prompt : typing.Optional[str] - - elevenlabs_voice_name : typing.Optional[str] - Use `elevenlabs_voice_id` instead - - elevenlabs_api_key : typing.Optional[str] - - elevenlabs_voice_id : typing.Optional[str] - - elevenlabs_model : typing.Optional[str] - - elevenlabs_stability : typing.Optional[float] - - elevenlabs_similarity_boost : typing.Optional[float] - - elevenlabs_style : typing.Optional[float] - - elevenlabs_speaker_boost : typing.Optional[bool] - - azure_voice_name : typing.Optional[str] - - openai_voice_name : typing.Optional[VideoBotsPageRequestOpenaiVoiceName] - - openai_tts_model : typing.Optional[VideoBotsPageRequestOpenaiTtsModel] - - input_face : typing.Optional[str] - - face_padding_top : typing.Optional[int] - - face_padding_bottom : typing.Optional[int] - - face_padding_left : typing.Optional[int] - - face_padding_right : typing.Optional[int] - - sadtalker_settings : typing.Optional[SadTalkerSettings] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - VideoBotsPageResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.copilot_for_your_enterprise.video_bots() - """ - _response = self._client_wrapper.httpx_client.request( - "v2/video-bots/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "input_prompt": input_prompt, - "input_audio": input_audio, - "input_images": input_images, - "input_documents": input_documents, - "doc_extract_url": doc_extract_url, - "messages": messages, - "bot_script": bot_script, - "selected_model": selected_model, - "document_model": document_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "keyword_instructions": keyword_instructions, - "documents": documents, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "citation_style": citation_style, - "use_url_shortener": use_url_shortener, - "asr_model": asr_model, - "asr_language": asr_language, - "translation_model": translation_model, - "user_language": user_language, - "input_glossary_document": input_glossary_document, - "output_glossary_document": output_glossary_document, - "lipsync_model": lipsync_model, - "tools": tools, - "tts_provider": tts_provider, - "uberduck_voice_name": uberduck_voice_name, - "uberduck_speaking_rate": uberduck_speaking_rate, - "google_voice_name": google_voice_name, - "google_speaking_rate": google_speaking_rate, - "google_pitch": google_pitch, - "bark_history_prompt": bark_history_prompt, - "elevenlabs_voice_name": elevenlabs_voice_name, - "elevenlabs_api_key": elevenlabs_api_key, - "elevenlabs_voice_id": elevenlabs_voice_id, - "elevenlabs_model": elevenlabs_model, - "elevenlabs_stability": elevenlabs_stability, - "elevenlabs_similarity_boost": elevenlabs_similarity_boost, - "elevenlabs_style": elevenlabs_style, - "elevenlabs_speaker_boost": elevenlabs_speaker_boost, - "azure_voice_name": azure_voice_name, - "openai_voice_name": openai_voice_name, - "openai_tts_model": openai_tts_model, - "input_face": input_face, - "face_padding_top": face_padding_top, - "face_padding_bottom": face_padding_bottom, - "face_padding_left": face_padding_left, - "face_padding_right": face_padding_right, - "sadtalker_settings": sadtalker_settings, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(VideoBotsPageResponse, parse_obj_as(type_=VideoBotsPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def async_video_bots( - self, - *, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - input_prompt: typing.Optional[str] = OMIT, - input_audio: typing.Optional[str] = OMIT, - input_images: typing.Optional[typing.Sequence[str]] = OMIT, - input_documents: typing.Optional[typing.Sequence[str]] = OMIT, - doc_extract_url: typing.Optional[str] = OMIT, - messages: typing.Optional[typing.Sequence[ConversationEntry]] = OMIT, - bot_script: typing.Optional[str] = OMIT, - selected_model: typing.Optional[VideoBotsPageRequestSelectedModel] = OMIT, - document_model: typing.Optional[str] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - query_instructions: typing.Optional[str] = OMIT, - keyword_instructions: typing.Optional[str] = OMIT, - documents: typing.Optional[typing.Sequence[str]] = OMIT, - max_references: typing.Optional[int] = OMIT, - max_context_words: typing.Optional[int] = OMIT, - scroll_jump: typing.Optional[int] = OMIT, - embedding_model: typing.Optional[VideoBotsPageRequestEmbeddingModel] = OMIT, - dense_weight: typing.Optional[float] = OMIT, - citation_style: typing.Optional[VideoBotsPageRequestCitationStyle] = OMIT, - use_url_shortener: typing.Optional[bool] = OMIT, - asr_model: typing.Optional[VideoBotsPageRequestAsrModel] = OMIT, - asr_language: typing.Optional[str] = OMIT, - translation_model: typing.Optional[VideoBotsPageRequestTranslationModel] = OMIT, - user_language: typing.Optional[str] = OMIT, - input_glossary_document: typing.Optional[str] = OMIT, - output_glossary_document: typing.Optional[str] = OMIT, - lipsync_model: typing.Optional[VideoBotsPageRequestLipsyncModel] = OMIT, - tools: typing.Optional[typing.Sequence[LlmTools]] = OMIT, - tts_provider: typing.Optional[VideoBotsPageRequestTtsProvider] = OMIT, - uberduck_voice_name: typing.Optional[str] = OMIT, - uberduck_speaking_rate: typing.Optional[float] = OMIT, - google_voice_name: typing.Optional[str] = OMIT, - google_speaking_rate: typing.Optional[float] = OMIT, - google_pitch: typing.Optional[float] = OMIT, - bark_history_prompt: typing.Optional[str] = OMIT, - elevenlabs_voice_name: typing.Optional[str] = OMIT, - elevenlabs_api_key: typing.Optional[str] = OMIT, - elevenlabs_voice_id: typing.Optional[str] = OMIT, - elevenlabs_model: typing.Optional[str] = OMIT, - elevenlabs_stability: typing.Optional[float] = OMIT, - elevenlabs_similarity_boost: typing.Optional[float] = OMIT, - elevenlabs_style: typing.Optional[float] = OMIT, - elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, - azure_voice_name: typing.Optional[str] = OMIT, - openai_voice_name: typing.Optional[VideoBotsPageRequestOpenaiVoiceName] = OMIT, - openai_tts_model: typing.Optional[VideoBotsPageRequestOpenaiTtsModel] = OMIT, - input_face: typing.Optional[str] = OMIT, - face_padding_top: typing.Optional[int] = OMIT, - face_padding_bottom: typing.Optional[int] = OMIT, - face_padding_left: typing.Optional[int] = OMIT, - face_padding_right: typing.Optional[int] = OMIT, - sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - input_prompt : typing.Optional[str] - - input_audio : typing.Optional[str] - - input_images : typing.Optional[typing.Sequence[str]] - - input_documents : typing.Optional[typing.Sequence[str]] - - doc_extract_url : typing.Optional[str] - Select a workflow to extract text from documents and images. - - messages : typing.Optional[typing.Sequence[ConversationEntry]] - - bot_script : typing.Optional[str] - - selected_model : typing.Optional[VideoBotsPageRequestSelectedModel] - - document_model : typing.Optional[str] - When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - keyword_instructions : typing.Optional[str] - - documents : typing.Optional[typing.Sequence[str]] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - embedding_model : typing.Optional[VideoBotsPageRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - citation_style : typing.Optional[VideoBotsPageRequestCitationStyle] - - use_url_shortener : typing.Optional[bool] - - asr_model : typing.Optional[VideoBotsPageRequestAsrModel] - Choose a model to transcribe incoming audio messages to text. - - asr_language : typing.Optional[str] - Choose a language to transcribe incoming audio messages to text. - - translation_model : typing.Optional[VideoBotsPageRequestTranslationModel] - - user_language : typing.Optional[str] - Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. - - input_glossary_document : typing.Optional[str] - Translation Glossary for User Langauge -> LLM Language (English) - - output_glossary_document : typing.Optional[str] - Translation Glossary for LLM Language (English) -> User Langauge - - lipsync_model : typing.Optional[VideoBotsPageRequestLipsyncModel] - - tools : typing.Optional[typing.Sequence[LlmTools]] - Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). - - tts_provider : typing.Optional[VideoBotsPageRequestTtsProvider] - - uberduck_voice_name : typing.Optional[str] - - uberduck_speaking_rate : typing.Optional[float] - - google_voice_name : typing.Optional[str] - - google_speaking_rate : typing.Optional[float] - - google_pitch : typing.Optional[float] - - bark_history_prompt : typing.Optional[str] - - elevenlabs_voice_name : typing.Optional[str] - Use `elevenlabs_voice_id` instead - - elevenlabs_api_key : typing.Optional[str] - - elevenlabs_voice_id : typing.Optional[str] - - elevenlabs_model : typing.Optional[str] - - elevenlabs_stability : typing.Optional[float] - - elevenlabs_similarity_boost : typing.Optional[float] - - elevenlabs_style : typing.Optional[float] - - elevenlabs_speaker_boost : typing.Optional[bool] - - azure_voice_name : typing.Optional[str] - - openai_voice_name : typing.Optional[VideoBotsPageRequestOpenaiVoiceName] - - openai_tts_model : typing.Optional[VideoBotsPageRequestOpenaiTtsModel] - - input_face : typing.Optional[str] - - face_padding_top : typing.Optional[int] - - face_padding_bottom : typing.Optional[int] - - face_padding_left : typing.Optional[int] - - face_padding_right : typing.Optional[int] - - sadtalker_settings : typing.Optional[SadTalkerSettings] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.copilot_for_your_enterprise.async_video_bots() - """ - _response = self._client_wrapper.httpx_client.request( - "v3/video-bots/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "input_prompt": input_prompt, - "input_audio": input_audio, - "input_images": input_images, - "input_documents": input_documents, - "doc_extract_url": doc_extract_url, - "messages": messages, - "bot_script": bot_script, - "selected_model": selected_model, - "document_model": document_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "keyword_instructions": keyword_instructions, - "documents": documents, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "citation_style": citation_style, - "use_url_shortener": use_url_shortener, - "asr_model": asr_model, - "asr_language": asr_language, - "translation_model": translation_model, - "user_language": user_language, - "input_glossary_document": input_glossary_document, - "output_glossary_document": output_glossary_document, - "lipsync_model": lipsync_model, - "tools": tools, - "tts_provider": tts_provider, - "uberduck_voice_name": uberduck_voice_name, - "uberduck_speaking_rate": uberduck_speaking_rate, - "google_voice_name": google_voice_name, - "google_speaking_rate": google_speaking_rate, - "google_pitch": google_pitch, - "bark_history_prompt": bark_history_prompt, - "elevenlabs_voice_name": elevenlabs_voice_name, - "elevenlabs_api_key": elevenlabs_api_key, - "elevenlabs_voice_id": elevenlabs_voice_id, - "elevenlabs_model": elevenlabs_model, - "elevenlabs_stability": elevenlabs_stability, - "elevenlabs_similarity_boost": elevenlabs_similarity_boost, - "elevenlabs_style": elevenlabs_style, - "elevenlabs_speaker_boost": elevenlabs_speaker_boost, - "azure_voice_name": azure_voice_name, - "openai_voice_name": openai_voice_name, - "openai_tts_model": openai_tts_model, - "input_face": input_face, - "face_padding_top": face_padding_top, - "face_padding_bottom": face_padding_bottom, - "face_padding_left": face_padding_left, - "face_padding_right": face_padding_right, - "sadtalker_settings": sadtalker_settings, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def status_video_bots( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> VideoBotsPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - VideoBotsPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.copilot_for_your_enterprise.status_video_bots( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/video-bots/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(VideoBotsPageStatusResponse, parse_obj_as(type_=VideoBotsPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncCopilotForYourEnterpriseClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def video_bots( + def async_video_bots( self, *, + example_id: typing.Optional[str] = None, functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, input_prompt: typing.Optional[str] = OMIT, @@ -726,11 +52,6 @@ async def video_bots( bot_script: typing.Optional[str] = OMIT, selected_model: typing.Optional[VideoBotsPageRequestSelectedModel] = OMIT, document_model: typing.Optional[str] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, task_instructions: typing.Optional[str] = OMIT, query_instructions: typing.Optional[str] = OMIT, keyword_instructions: typing.Optional[str] = OMIT, @@ -750,6 +71,12 @@ async def video_bots( output_glossary_document: typing.Optional[str] = OMIT, lipsync_model: typing.Optional[VideoBotsPageRequestLipsyncModel] = OMIT, tools: typing.Optional[typing.Sequence[LlmTools]] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[VideoBotsPageRequestResponseFormatType] = OMIT, tts_provider: typing.Optional[VideoBotsPageRequestTtsProvider] = OMIT, uberduck_voice_name: typing.Optional[str] = OMIT, uberduck_speaking_rate: typing.Optional[float] = OMIT, @@ -780,6 +107,8 @@ async def video_bots( """ Parameters ---------- + example_id : typing.Optional[str] + functions : typing.Optional[typing.Sequence[RecipeFunction]] variables : typing.Optional[typing.Dict[str, typing.Any]] @@ -805,16 +134,6 @@ async def video_bots( document_model : typing.Optional[str] When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - task_instructions : typing.Optional[str] query_instructions : typing.Optional[str] @@ -832,9 +151,11 @@ async def video_bots( embedding_model : typing.Optional[VideoBotsPageRequestEmbeddingModel] dense_weight : typing.Optional[float] + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + citation_style : typing.Optional[VideoBotsPageRequestCitationStyle] use_url_shortener : typing.Optional[bool] @@ -851,16 +172,32 @@ async def video_bots( Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. input_glossary_document : typing.Optional[str] + Translation Glossary for User Langauge -> LLM Language (English) + output_glossary_document : typing.Optional[str] + Translation Glossary for LLM Language (English) -> User Langauge + lipsync_model : typing.Optional[VideoBotsPageRequestLipsyncModel] tools : typing.Optional[typing.Sequence[LlmTools]] Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[VideoBotsPageRequestResponseFormatType] + tts_provider : typing.Optional[VideoBotsPageRequestTtsProvider] uberduck_voice_name : typing.Optional[str] @@ -922,25 +259,17 @@ async def video_bots( Examples -------- - import asyncio - - from gooey import AsyncGooey + from gooey import Gooey - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", + client = Gooey( api_key="YOUR_API_KEY", ) - - - async def main() -> None: - await client.copilot_for_your_enterprise.video_bots() - - - asyncio.run(main()) + client.copilot_for_your_enterprise.async_video_bots() """ - _response = await self._client_wrapper.httpx_client.request( - "v2/video-bots/", + _response = self._client_wrapper.httpx_client.request( + "v3/video-bots/async", method="POST", + params={"example_id": example_id}, json={ "functions": functions, "variables": variables, @@ -953,11 +282,6 @@ async def main() -> None: "bot_script": bot_script, "selected_model": selected_model, "document_model": document_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, "task_instructions": task_instructions, "query_instructions": query_instructions, "keyword_instructions": keyword_instructions, @@ -977,6 +301,12 @@ async def main() -> None: "output_glossary_document": output_glossary_document, "lipsync_model": lipsync_model, "tools": tools, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, "tts_provider": tts_provider, "uberduck_voice_name": uberduck_voice_name, "uberduck_speaking_rate": uberduck_speaking_rate, @@ -1021,18 +351,20 @@ async def main() -> None: raise TooManyRequestsError( typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) + +class AsyncCopilotForYourEnterpriseClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + async def async_video_bots( self, *, + example_id: typing.Optional[str] = None, functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, input_prompt: typing.Optional[str] = OMIT, @@ -1044,11 +376,6 @@ async def async_video_bots( bot_script: typing.Optional[str] = OMIT, selected_model: typing.Optional[VideoBotsPageRequestSelectedModel] = OMIT, document_model: typing.Optional[str] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, task_instructions: typing.Optional[str] = OMIT, query_instructions: typing.Optional[str] = OMIT, keyword_instructions: typing.Optional[str] = OMIT, @@ -1068,6 +395,12 @@ async def async_video_bots( output_glossary_document: typing.Optional[str] = OMIT, lipsync_model: typing.Optional[VideoBotsPageRequestLipsyncModel] = OMIT, tools: typing.Optional[typing.Sequence[LlmTools]] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[VideoBotsPageRequestResponseFormatType] = OMIT, tts_provider: typing.Optional[VideoBotsPageRequestTtsProvider] = OMIT, uberduck_voice_name: typing.Optional[str] = OMIT, uberduck_speaking_rate: typing.Optional[float] = OMIT, @@ -1094,10 +427,12 @@ async def async_video_bots( sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: + ) -> VideoBotsPageResponse: """ Parameters ---------- + example_id : typing.Optional[str] + functions : typing.Optional[typing.Sequence[RecipeFunction]] variables : typing.Optional[typing.Dict[str, typing.Any]] @@ -1123,16 +458,6 @@ async def async_video_bots( document_model : typing.Optional[str] When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - task_instructions : typing.Optional[str] query_instructions : typing.Optional[str] @@ -1150,9 +475,11 @@ async def async_video_bots( embedding_model : typing.Optional[VideoBotsPageRequestEmbeddingModel] dense_weight : typing.Optional[float] + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + citation_style : typing.Optional[VideoBotsPageRequestCitationStyle] use_url_shortener : typing.Optional[bool] @@ -1169,16 +496,32 @@ async def async_video_bots( Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. input_glossary_document : typing.Optional[str] + Translation Glossary for User Langauge -> LLM Language (English) + output_glossary_document : typing.Optional[str] + Translation Glossary for LLM Language (English) -> User Langauge + lipsync_model : typing.Optional[VideoBotsPageRequestLipsyncModel] tools : typing.Optional[typing.Sequence[LlmTools]] Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[VideoBotsPageRequestResponseFormatType] + tts_provider : typing.Optional[VideoBotsPageRequestTtsProvider] uberduck_voice_name : typing.Optional[str] @@ -1235,7 +578,7 @@ async def async_video_bots( Returns ------- - AsyncApiResponseModelV3 + VideoBotsPageResponse Successful Response Examples @@ -1245,7 +588,6 @@ async def async_video_bots( from gooey import AsyncGooey client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) @@ -1257,8 +599,9 @@ async def main() -> None: asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/video-bots/async/", + "v3/video-bots/async", method="POST", + params={"example_id": example_id}, json={ "functions": functions, "variables": variables, @@ -1271,11 +614,6 @@ async def main() -> None: "bot_script": bot_script, "selected_model": selected_model, "document_model": document_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, "task_instructions": task_instructions, "query_instructions": query_instructions, "keyword_instructions": keyword_instructions, @@ -1295,6 +633,12 @@ async def main() -> None: "output_glossary_document": output_glossary_document, "lipsync_model": lipsync_model, "tools": tools, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, "tts_provider": tts_provider, "uberduck_voice_name": uberduck_voice_name, "uberduck_speaking_rate": uberduck_speaking_rate, @@ -1326,66 +670,7 @@ async def main() -> None: ) try: if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def status_video_bots( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> VideoBotsPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - VideoBotsPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.copilot_for_your_enterprise.status_video_bots( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/video-bots/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(VideoBotsPageStatusResponse, parse_obj_as(type_=VideoBotsPageStatusResponse, object_=_response.json())) # type: ignore + return typing.cast(VideoBotsPageResponse, parse_obj_as(type_=VideoBotsPageResponse, object_=_response.json())) # type: ignore if _response.status_code == 402: raise PaymentRequiredError( typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore diff --git a/src/gooey/copilot_for_your_enterprise/types/__init__.py b/src/gooey/copilot_for_your_enterprise/types/__init__.py new file mode 100644 index 0000000..dd7ed8b --- /dev/null +++ b/src/gooey/copilot_for_your_enterprise/types/__init__.py @@ -0,0 +1,25 @@ +# This file was auto-generated by Fern from our API Definition. + +from .video_bots_page_request_asr_model import VideoBotsPageRequestAsrModel +from .video_bots_page_request_citation_style import VideoBotsPageRequestCitationStyle +from .video_bots_page_request_embedding_model import VideoBotsPageRequestEmbeddingModel +from .video_bots_page_request_lipsync_model import VideoBotsPageRequestLipsyncModel +from .video_bots_page_request_openai_tts_model import VideoBotsPageRequestOpenaiTtsModel +from .video_bots_page_request_openai_voice_name import VideoBotsPageRequestOpenaiVoiceName +from .video_bots_page_request_response_format_type import VideoBotsPageRequestResponseFormatType +from .video_bots_page_request_selected_model import VideoBotsPageRequestSelectedModel +from .video_bots_page_request_translation_model import VideoBotsPageRequestTranslationModel +from .video_bots_page_request_tts_provider import VideoBotsPageRequestTtsProvider + +__all__ = [ + "VideoBotsPageRequestAsrModel", + "VideoBotsPageRequestCitationStyle", + "VideoBotsPageRequestEmbeddingModel", + "VideoBotsPageRequestLipsyncModel", + "VideoBotsPageRequestOpenaiTtsModel", + "VideoBotsPageRequestOpenaiVoiceName", + "VideoBotsPageRequestResponseFormatType", + "VideoBotsPageRequestSelectedModel", + "VideoBotsPageRequestTranslationModel", + "VideoBotsPageRequestTtsProvider", +] diff --git a/src/gooey/types/video_bots_page_request_asr_model.py b/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_asr_model.py similarity index 94% rename from src/gooey/types/video_bots_page_request_asr_model.py rename to src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_asr_model.py index fa50247..7db13bc 100644 --- a/src/gooey/types/video_bots_page_request_asr_model.py +++ b/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_asr_model.py @@ -15,8 +15,9 @@ "usm", "deepgram", "azure", - "seamless_m4t", + "seamless_m4t_v2", "mms_1b_all", + "seamless_m4t", ], typing.Any, ] diff --git a/src/gooey/types/video_bots_page_request_citation_style.py b/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_citation_style.py similarity index 100% rename from src/gooey/types/video_bots_page_request_citation_style.py rename to src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_citation_style.py diff --git a/src/gooey/types/video_bots_page_request_embedding_model.py b/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_embedding_model.py similarity index 100% rename from src/gooey/types/video_bots_page_request_embedding_model.py rename to src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_embedding_model.py diff --git a/src/gooey/types/video_bots_page_request_lipsync_model.py b/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_lipsync_model.py similarity index 100% rename from src/gooey/types/video_bots_page_request_lipsync_model.py rename to src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_lipsync_model.py diff --git a/src/gooey/types/video_bots_page_request_openai_tts_model.py b/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_openai_tts_model.py similarity index 100% rename from src/gooey/types/video_bots_page_request_openai_tts_model.py rename to src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_openai_tts_model.py diff --git a/src/gooey/types/video_bots_page_request_openai_voice_name.py b/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_openai_voice_name.py similarity index 100% rename from src/gooey/types/video_bots_page_request_openai_voice_name.py rename to src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_openai_voice_name.py diff --git a/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_response_format_type.py b/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_response_format_type.py new file mode 100644 index 0000000..25cc8f1 --- /dev/null +++ b/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +VideoBotsPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/video_bots_page_request_selected_model.py b/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_selected_model.py similarity index 79% rename from src/gooey/types/video_bots_page_request_selected_model.py rename to src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_selected_model.py index 8f0e1e1..e327a7d 100644 --- a/src/gooey/types/video_bots_page_request_selected_model.py +++ b/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_selected_model.py @@ -5,6 +5,8 @@ VideoBotsPageRequestSelectedModel = typing.Union[ typing.Literal[ "gpt_4_o", + "gpt_4_o_mini", + "chatgpt_4_o", "gpt_4_turbo_vision", "gpt_4_vision", "gpt_4_turbo", @@ -14,10 +16,14 @@ "gpt_3_5_turbo_16k", "gpt_3_5_turbo_instruct", "llama3_70b", + "llama_3_groq_70b_tool_use", "llama3_8b", + "llama_3_groq_8b_tool_use", "llama2_70b_chat", "mixtral_8x7b_instruct_0_1", + "gemma_2_9b_it", "gemma_7b_it", + "gemini_1_5_flash", "gemini_1_5_pro", "gemini_1_pro_vision", "gemini_1_pro", @@ -28,6 +34,8 @@ "claude_3_sonnet", "claude_3_haiku", "sea_lion_7b_instruct", + "llama3_8b_cpt_sea_lion_v2_instruct", + "sarvam_2b", "text_davinci_003", "text_davinci_002", "code_davinci_002", diff --git a/src/gooey/types/video_bots_page_request_translation_model.py b/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_translation_model.py similarity index 100% rename from src/gooey/types/video_bots_page_request_translation_model.py rename to src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_translation_model.py diff --git a/src/gooey/types/video_bots_page_request_tts_provider.py b/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_tts_provider.py similarity index 100% rename from src/gooey/types/video_bots_page_request_tts_provider.py rename to src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_tts_provider.py diff --git a/src/gooey/copilot_integrations/__init__.py b/src/gooey/copilot_integrations/__init__.py index 3861c31..8d66257 100644 --- a/src/gooey/copilot_integrations/__init__.py +++ b/src/gooey/copilot_integrations/__init__.py @@ -7,6 +7,7 @@ CreateStreamRequestLipsyncModel, CreateStreamRequestOpenaiTtsModel, CreateStreamRequestOpenaiVoiceName, + CreateStreamRequestResponseFormatType, CreateStreamRequestSelectedModel, CreateStreamRequestTranslationModel, CreateStreamRequestTtsProvider, @@ -20,6 +21,7 @@ "CreateStreamRequestLipsyncModel", "CreateStreamRequestOpenaiTtsModel", "CreateStreamRequestOpenaiVoiceName", + "CreateStreamRequestResponseFormatType", "CreateStreamRequestSelectedModel", "CreateStreamRequestTranslationModel", "CreateStreamRequestTtsProvider", diff --git a/src/gooey/copilot_integrations/client.py b/src/gooey/copilot_integrations/client.py index ca1c582..abc570c 100644 --- a/src/gooey/copilot_integrations/client.py +++ b/src/gooey/copilot_integrations/client.py @@ -23,6 +23,7 @@ from .types.create_stream_request_lipsync_model import CreateStreamRequestLipsyncModel from .types.create_stream_request_openai_tts_model import CreateStreamRequestOpenaiTtsModel from .types.create_stream_request_openai_voice_name import CreateStreamRequestOpenaiVoiceName +from .types.create_stream_request_response_format_type import CreateStreamRequestResponseFormatType from .types.create_stream_request_selected_model import CreateStreamRequestSelectedModel from .types.create_stream_request_translation_model import CreateStreamRequestTranslationModel from .types.create_stream_request_tts_provider import CreateStreamRequestTtsProvider @@ -55,11 +56,6 @@ def video_bots_stream_create( bot_script: typing.Optional[str] = OMIT, selected_model: typing.Optional[CreateStreamRequestSelectedModel] = OMIT, document_model: typing.Optional[str] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, task_instructions: typing.Optional[str] = OMIT, query_instructions: typing.Optional[str] = OMIT, keyword_instructions: typing.Optional[str] = OMIT, @@ -79,6 +75,12 @@ def video_bots_stream_create( output_glossary_document: typing.Optional[str] = OMIT, lipsync_model: typing.Optional[CreateStreamRequestLipsyncModel] = OMIT, tools: typing.Optional[typing.Sequence[LlmTools]] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[CreateStreamRequestResponseFormatType] = OMIT, tts_provider: typing.Optional[CreateStreamRequestTtsProvider] = OMIT, uberduck_voice_name: typing.Optional[str] = OMIT, uberduck_speaking_rate: typing.Optional[float] = OMIT, @@ -157,16 +159,6 @@ def video_bots_stream_create( document_model : typing.Optional[str] When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - task_instructions : typing.Optional[str] query_instructions : typing.Optional[str] @@ -219,6 +211,18 @@ def video_bots_stream_create( tools : typing.Optional[typing.Sequence[LlmTools]] Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[CreateStreamRequestResponseFormatType] + tts_provider : typing.Optional[CreateStreamRequestTtsProvider] uberduck_voice_name : typing.Optional[str] @@ -284,7 +288,6 @@ def video_bots_stream_create( from gooey import Gooey client = Gooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) client.copilot_integrations.video_bots_stream_create( @@ -292,7 +295,7 @@ def video_bots_stream_create( ) """ _response = self._client_wrapper.httpx_client.request( - "v3/integrations/stream/", + "v3/integrations/stream", method="POST", json={ "integration_id": integration_id, @@ -311,11 +314,6 @@ def video_bots_stream_create( "bot_script": bot_script, "selected_model": selected_model, "document_model": document_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, "task_instructions": task_instructions, "query_instructions": query_instructions, "keyword_instructions": keyword_instructions, @@ -335,6 +333,12 @@ def video_bots_stream_create( "output_glossary_document": output_glossary_document, "lipsync_model": lipsync_model, "tools": tools, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, "tts_provider": tts_provider, "uberduck_voice_name": uberduck_voice_name, "uberduck_speaking_rate": uberduck_speaking_rate, @@ -401,7 +405,6 @@ def video_bots_stream( from gooey import Gooey client = Gooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) client.copilot_integrations.video_bots_stream( @@ -409,7 +412,7 @@ def video_bots_stream( ) """ _response = self._client_wrapper.httpx_client.request( - f"v3/integrations/stream/{jsonable_encoder(request_id)}/", method="GET", request_options=request_options + f"v3/integrations/stream/{jsonable_encoder(request_id)}", method="GET", request_options=request_options ) try: if 200 <= _response.status_code < 300: @@ -451,11 +454,6 @@ async def video_bots_stream_create( bot_script: typing.Optional[str] = OMIT, selected_model: typing.Optional[CreateStreamRequestSelectedModel] = OMIT, document_model: typing.Optional[str] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, task_instructions: typing.Optional[str] = OMIT, query_instructions: typing.Optional[str] = OMIT, keyword_instructions: typing.Optional[str] = OMIT, @@ -475,6 +473,12 @@ async def video_bots_stream_create( output_glossary_document: typing.Optional[str] = OMIT, lipsync_model: typing.Optional[CreateStreamRequestLipsyncModel] = OMIT, tools: typing.Optional[typing.Sequence[LlmTools]] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[CreateStreamRequestResponseFormatType] = OMIT, tts_provider: typing.Optional[CreateStreamRequestTtsProvider] = OMIT, uberduck_voice_name: typing.Optional[str] = OMIT, uberduck_speaking_rate: typing.Optional[float] = OMIT, @@ -553,16 +557,6 @@ async def video_bots_stream_create( document_model : typing.Optional[str] When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - task_instructions : typing.Optional[str] query_instructions : typing.Optional[str] @@ -615,6 +609,18 @@ async def video_bots_stream_create( tools : typing.Optional[typing.Sequence[LlmTools]] Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[CreateStreamRequestResponseFormatType] + tts_provider : typing.Optional[CreateStreamRequestTtsProvider] uberduck_voice_name : typing.Optional[str] @@ -682,7 +688,6 @@ async def video_bots_stream_create( from gooey import AsyncGooey client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) @@ -696,7 +701,7 @@ async def main() -> None: asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/integrations/stream/", + "v3/integrations/stream", method="POST", json={ "integration_id": integration_id, @@ -715,11 +720,6 @@ async def main() -> None: "bot_script": bot_script, "selected_model": selected_model, "document_model": document_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, "task_instructions": task_instructions, "query_instructions": query_instructions, "keyword_instructions": keyword_instructions, @@ -739,6 +739,12 @@ async def main() -> None: "output_glossary_document": output_glossary_document, "lipsync_model": lipsync_model, "tools": tools, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, "tts_provider": tts_provider, "uberduck_voice_name": uberduck_voice_name, "uberduck_speaking_rate": uberduck_speaking_rate, @@ -807,7 +813,6 @@ async def video_bots_stream( from gooey import AsyncGooey client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) @@ -821,7 +826,7 @@ async def main() -> None: asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - f"v3/integrations/stream/{jsonable_encoder(request_id)}/", method="GET", request_options=request_options + f"v3/integrations/stream/{jsonable_encoder(request_id)}", method="GET", request_options=request_options ) try: if 200 <= _response.status_code < 300: diff --git a/src/gooey/copilot_integrations/types/__init__.py b/src/gooey/copilot_integrations/types/__init__.py index 4e7d806..1224051 100644 --- a/src/gooey/copilot_integrations/types/__init__.py +++ b/src/gooey/copilot_integrations/types/__init__.py @@ -6,6 +6,7 @@ from .create_stream_request_lipsync_model import CreateStreamRequestLipsyncModel from .create_stream_request_openai_tts_model import CreateStreamRequestOpenaiTtsModel from .create_stream_request_openai_voice_name import CreateStreamRequestOpenaiVoiceName +from .create_stream_request_response_format_type import CreateStreamRequestResponseFormatType from .create_stream_request_selected_model import CreateStreamRequestSelectedModel from .create_stream_request_translation_model import CreateStreamRequestTranslationModel from .create_stream_request_tts_provider import CreateStreamRequestTtsProvider @@ -18,6 +19,7 @@ "CreateStreamRequestLipsyncModel", "CreateStreamRequestOpenaiTtsModel", "CreateStreamRequestOpenaiVoiceName", + "CreateStreamRequestResponseFormatType", "CreateStreamRequestSelectedModel", "CreateStreamRequestTranslationModel", "CreateStreamRequestTtsProvider", diff --git a/src/gooey/copilot_integrations/types/create_stream_request_asr_model.py b/src/gooey/copilot_integrations/types/create_stream_request_asr_model.py index c6d4550..af166fa 100644 --- a/src/gooey/copilot_integrations/types/create_stream_request_asr_model.py +++ b/src/gooey/copilot_integrations/types/create_stream_request_asr_model.py @@ -15,8 +15,9 @@ "usm", "deepgram", "azure", - "seamless_m4t", + "seamless_m4t_v2", "mms_1b_all", + "seamless_m4t", ], typing.Any, ] diff --git a/src/gooey/copilot_integrations/types/create_stream_request_response_format_type.py b/src/gooey/copilot_integrations/types/create_stream_request_response_format_type.py new file mode 100644 index 0000000..dc5024d --- /dev/null +++ b/src/gooey/copilot_integrations/types/create_stream_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +CreateStreamRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/copilot_integrations/types/create_stream_request_selected_model.py b/src/gooey/copilot_integrations/types/create_stream_request_selected_model.py index 765029f..7227a94 100644 --- a/src/gooey/copilot_integrations/types/create_stream_request_selected_model.py +++ b/src/gooey/copilot_integrations/types/create_stream_request_selected_model.py @@ -5,6 +5,8 @@ CreateStreamRequestSelectedModel = typing.Union[ typing.Literal[ "gpt_4_o", + "gpt_4_o_mini", + "chatgpt_4_o", "gpt_4_turbo_vision", "gpt_4_vision", "gpt_4_turbo", @@ -14,10 +16,14 @@ "gpt_3_5_turbo_16k", "gpt_3_5_turbo_instruct", "llama3_70b", + "llama_3_groq_70b_tool_use", "llama3_8b", + "llama_3_groq_8b_tool_use", "llama2_70b_chat", "mixtral_8x7b_instruct_0_1", + "gemma_2_9b_it", "gemma_7b_it", + "gemini_1_5_flash", "gemini_1_5_pro", "gemini_1_pro_vision", "gemini_1_pro", @@ -28,6 +34,8 @@ "claude_3_sonnet", "claude_3_haiku", "sea_lion_7b_instruct", + "llama3_8b_cpt_sea_lion_v2_instruct", + "sarvam_2b", "text_davinci_003", "text_davinci_002", "code_davinci_002", diff --git a/src/gooey/core/client_wrapper.py b/src/gooey/core/client_wrapper.py index eafa82c..be15e61 100644 --- a/src/gooey/core/client_wrapper.py +++ b/src/gooey/core/client_wrapper.py @@ -11,12 +11,10 @@ class BaseClientWrapper: def __init__( self, *, - authorization: typing.Optional[str] = None, api_key: typing.Union[str, typing.Callable[[], str]], base_url: str, timeout: typing.Optional[float] = None, ): - self._authorization = authorization self._api_key = api_key self._base_url = base_url self._timeout = timeout @@ -25,10 +23,8 @@ def get_headers(self) -> typing.Dict[str, str]: headers: typing.Dict[str, str] = { "X-Fern-Language": "Python", "X-Fern-SDK-Name": "gooeyai", - "X-Fern-SDK-Version": "0.0.1b1", + "X-Fern-SDK-Version": "0.0.1-beta3", } - if self._authorization is not None: - headers["Authorization"] = self._authorization headers["Authorization"] = f"Bearer {self._get_api_key()}" return headers @@ -49,13 +45,12 @@ class SyncClientWrapper(BaseClientWrapper): def __init__( self, *, - authorization: typing.Optional[str] = None, api_key: typing.Union[str, typing.Callable[[], str]], base_url: str, timeout: typing.Optional[float] = None, httpx_client: httpx.Client, ): - super().__init__(authorization=authorization, api_key=api_key, base_url=base_url, timeout=timeout) + super().__init__(api_key=api_key, base_url=base_url, timeout=timeout) self.httpx_client = HttpClient( httpx_client=httpx_client, base_headers=self.get_headers(), @@ -68,13 +63,12 @@ class AsyncClientWrapper(BaseClientWrapper): def __init__( self, *, - authorization: typing.Optional[str] = None, api_key: typing.Union[str, typing.Callable[[], str]], base_url: str, timeout: typing.Optional[float] = None, httpx_client: httpx.AsyncClient, ): - super().__init__(authorization=authorization, api_key=api_key, base_url=base_url, timeout=timeout) + super().__init__(api_key=api_key, base_url=base_url, timeout=timeout) self.httpx_client = AsyncHttpClient( httpx_client=httpx_client, base_headers=self.get_headers(), diff --git a/src/gooey/create_a_perfect_seo_optimized_title_paragraph/__init__.py b/src/gooey/create_a_perfect_seo_optimized_title_paragraph/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/create_a_perfect_seo_optimized_title_paragraph/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/create_a_perfect_seo_optimized_title_paragraph/client.py b/src/gooey/create_a_perfect_seo_optimized_title_paragraph/client.py deleted file mode 100644 index 8170883..0000000 --- a/src/gooey/create_a_perfect_seo_optimized_title_paragraph/client.py +++ /dev/null @@ -1,734 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.internal_server_error import InternalServerError -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3 -from ..types.failed_reponse_model_v2 import FailedReponseModelV2 -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError -from ..types.run_settings import RunSettings -from ..types.seo_summary_page_request_selected_model import SeoSummaryPageRequestSelectedModel -from ..types.seo_summary_page_response import SeoSummaryPageResponse -from ..types.seo_summary_page_status_response import SeoSummaryPageStatusResponse -from ..types.serp_search_location import SerpSearchLocation -from ..types.serp_search_type import SerpSearchType - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class CreateAPerfectSeoOptimizedTitleParagraphClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def seo_summary( - self, - *, - search_query: str, - keywords: str, - title: str, - company_url: str, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, - scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - serp_search_type: typing.Optional[SerpSearchType] = OMIT, - scaleserp_search_field: typing.Optional[str] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - enable_html: typing.Optional[bool] = OMIT, - selected_model: typing.Optional[SeoSummaryPageRequestSelectedModel] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - max_search_urls: typing.Optional[int] = OMIT, - enable_crosslinks: typing.Optional[bool] = OMIT, - seed: typing.Optional[int] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> SeoSummaryPageResponse: - """ - Parameters - ---------- - search_query : str - - keywords : str - - title : str - - company_url : str - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.Sequence[str]] - DEPRECATED: use `serp_search_location` instead - - serp_search_type : typing.Optional[SerpSearchType] - - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead - - task_instructions : typing.Optional[str] - - enable_html : typing.Optional[bool] - - selected_model : typing.Optional[SeoSummaryPageRequestSelectedModel] - - sampling_temperature : typing.Optional[float] - - max_tokens : typing.Optional[int] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - avoid_repetition : typing.Optional[bool] - - max_search_urls : typing.Optional[int] - - enable_crosslinks : typing.Optional[bool] - - seed : typing.Optional[int] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SeoSummaryPageResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.create_a_perfect_seo_optimized_title_paragraph.seo_summary( - search_query="search_query", - keywords="keywords", - title="title", - company_url="company_url", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v2/SEOSummary/", - method="POST", - json={ - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, - "search_query": search_query, - "keywords": keywords, - "title": title, - "company_url": company_url, - "task_instructions": task_instructions, - "enable_html": enable_html, - "selected_model": selected_model, - "sampling_temperature": sampling_temperature, - "max_tokens": max_tokens, - "num_outputs": num_outputs, - "quality": quality, - "avoid_repetition": avoid_repetition, - "max_search_urls": max_search_urls, - "enable_crosslinks": enable_crosslinks, - "seed": seed, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(SeoSummaryPageResponse, parse_obj_as(type_=SeoSummaryPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def async_seo_summary( - self, - *, - search_query: str, - keywords: str, - title: str, - company_url: str, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, - scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - serp_search_type: typing.Optional[SerpSearchType] = OMIT, - scaleserp_search_field: typing.Optional[str] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - enable_html: typing.Optional[bool] = OMIT, - selected_model: typing.Optional[SeoSummaryPageRequestSelectedModel] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - max_search_urls: typing.Optional[int] = OMIT, - enable_crosslinks: typing.Optional[bool] = OMIT, - seed: typing.Optional[int] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - search_query : str - - keywords : str - - title : str - - company_url : str - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.Sequence[str]] - DEPRECATED: use `serp_search_location` instead - - serp_search_type : typing.Optional[SerpSearchType] - - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead - - task_instructions : typing.Optional[str] - - enable_html : typing.Optional[bool] - - selected_model : typing.Optional[SeoSummaryPageRequestSelectedModel] - - sampling_temperature : typing.Optional[float] - - max_tokens : typing.Optional[int] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - avoid_repetition : typing.Optional[bool] - - max_search_urls : typing.Optional[int] - - enable_crosslinks : typing.Optional[bool] - - seed : typing.Optional[int] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.create_a_perfect_seo_optimized_title_paragraph.async_seo_summary( - search_query="search_query", - keywords="keywords", - title="title", - company_url="company_url", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/SEOSummary/async/", - method="POST", - json={ - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, - "search_query": search_query, - "keywords": keywords, - "title": title, - "company_url": company_url, - "task_instructions": task_instructions, - "enable_html": enable_html, - "selected_model": selected_model, - "sampling_temperature": sampling_temperature, - "max_tokens": max_tokens, - "num_outputs": num_outputs, - "quality": quality, - "avoid_repetition": avoid_repetition, - "max_search_urls": max_search_urls, - "enable_crosslinks": enable_crosslinks, - "seed": seed, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def status_seo_summary( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> SeoSummaryPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SeoSummaryPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.create_a_perfect_seo_optimized_title_paragraph.status_seo_summary( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/SEOSummary/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(SeoSummaryPageStatusResponse, parse_obj_as(type_=SeoSummaryPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncCreateAPerfectSeoOptimizedTitleParagraphClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def seo_summary( - self, - *, - search_query: str, - keywords: str, - title: str, - company_url: str, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, - scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - serp_search_type: typing.Optional[SerpSearchType] = OMIT, - scaleserp_search_field: typing.Optional[str] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - enable_html: typing.Optional[bool] = OMIT, - selected_model: typing.Optional[SeoSummaryPageRequestSelectedModel] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - max_search_urls: typing.Optional[int] = OMIT, - enable_crosslinks: typing.Optional[bool] = OMIT, - seed: typing.Optional[int] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> SeoSummaryPageResponse: - """ - Parameters - ---------- - search_query : str - - keywords : str - - title : str - - company_url : str - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.Sequence[str]] - DEPRECATED: use `serp_search_location` instead - - serp_search_type : typing.Optional[SerpSearchType] - - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead - - task_instructions : typing.Optional[str] - - enable_html : typing.Optional[bool] - - selected_model : typing.Optional[SeoSummaryPageRequestSelectedModel] - - sampling_temperature : typing.Optional[float] - - max_tokens : typing.Optional[int] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - avoid_repetition : typing.Optional[bool] - - max_search_urls : typing.Optional[int] - - enable_crosslinks : typing.Optional[bool] - - seed : typing.Optional[int] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SeoSummaryPageResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.create_a_perfect_seo_optimized_title_paragraph.seo_summary( - search_query="search_query", - keywords="keywords", - title="title", - company_url="company_url", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v2/SEOSummary/", - method="POST", - json={ - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, - "search_query": search_query, - "keywords": keywords, - "title": title, - "company_url": company_url, - "task_instructions": task_instructions, - "enable_html": enable_html, - "selected_model": selected_model, - "sampling_temperature": sampling_temperature, - "max_tokens": max_tokens, - "num_outputs": num_outputs, - "quality": quality, - "avoid_repetition": avoid_repetition, - "max_search_urls": max_search_urls, - "enable_crosslinks": enable_crosslinks, - "seed": seed, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(SeoSummaryPageResponse, parse_obj_as(type_=SeoSummaryPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def async_seo_summary( - self, - *, - search_query: str, - keywords: str, - title: str, - company_url: str, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, - scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - serp_search_type: typing.Optional[SerpSearchType] = OMIT, - scaleserp_search_field: typing.Optional[str] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - enable_html: typing.Optional[bool] = OMIT, - selected_model: typing.Optional[SeoSummaryPageRequestSelectedModel] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - max_search_urls: typing.Optional[int] = OMIT, - enable_crosslinks: typing.Optional[bool] = OMIT, - seed: typing.Optional[int] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - search_query : str - - keywords : str - - title : str - - company_url : str - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.Sequence[str]] - DEPRECATED: use `serp_search_location` instead - - serp_search_type : typing.Optional[SerpSearchType] - - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead - - task_instructions : typing.Optional[str] - - enable_html : typing.Optional[bool] - - selected_model : typing.Optional[SeoSummaryPageRequestSelectedModel] - - sampling_temperature : typing.Optional[float] - - max_tokens : typing.Optional[int] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - avoid_repetition : typing.Optional[bool] - - max_search_urls : typing.Optional[int] - - enable_crosslinks : typing.Optional[bool] - - seed : typing.Optional[int] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.create_a_perfect_seo_optimized_title_paragraph.async_seo_summary( - search_query="search_query", - keywords="keywords", - title="title", - company_url="company_url", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/SEOSummary/async/", - method="POST", - json={ - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, - "search_query": search_query, - "keywords": keywords, - "title": title, - "company_url": company_url, - "task_instructions": task_instructions, - "enable_html": enable_html, - "selected_model": selected_model, - "sampling_temperature": sampling_temperature, - "max_tokens": max_tokens, - "num_outputs": num_outputs, - "quality": quality, - "avoid_repetition": avoid_repetition, - "max_search_urls": max_search_urls, - "enable_crosslinks": enable_crosslinks, - "seed": seed, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def status_seo_summary( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> SeoSummaryPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SeoSummaryPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.create_a_perfect_seo_optimized_title_paragraph.status_seo_summary( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/SEOSummary/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(SeoSummaryPageStatusResponse, parse_obj_as(type_=SeoSummaryPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/edit_an_image_with_ai_prompt/__init__.py b/src/gooey/edit_an_image_with_ai_prompt/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/edit_an_image_with_ai_prompt/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/edit_an_image_with_ai_prompt/client.py b/src/gooey/edit_an_image_with_ai_prompt/client.py deleted file mode 100644 index 6810878..0000000 --- a/src/gooey/edit_an_image_with_ai_prompt/client.py +++ /dev/null @@ -1,670 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.internal_server_error import InternalServerError -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3 -from ..types.failed_reponse_model_v2 import FailedReponseModelV2 -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError -from ..types.img2img_page_request_selected_controlnet_model import Img2ImgPageRequestSelectedControlnetModel -from ..types.img2img_page_request_selected_model import Img2ImgPageRequestSelectedModel -from ..types.img2img_page_response import Img2ImgPageResponse -from ..types.img2img_page_status_response import Img2ImgPageStatusResponse -from ..types.recipe_function import RecipeFunction -from ..types.run_settings import RunSettings - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class EditAnImageWithAiPromptClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def img2img( - self, - *, - input_image: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - text_prompt: typing.Optional[str] = OMIT, - selected_model: typing.Optional[Img2ImgPageRequestSelectedModel] = OMIT, - selected_controlnet_model: typing.Optional[Img2ImgPageRequestSelectedControlnetModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - prompt_strength: typing.Optional[float] = OMIT, - controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT, - seed: typing.Optional[int] = OMIT, - image_guidance_scale: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> Img2ImgPageResponse: - """ - Parameters - ---------- - input_image : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - text_prompt : typing.Optional[str] - - selected_model : typing.Optional[Img2ImgPageRequestSelectedModel] - - selected_controlnet_model : typing.Optional[Img2ImgPageRequestSelectedControlnetModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - prompt_strength : typing.Optional[float] - - controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]] - - seed : typing.Optional[int] - - image_guidance_scale : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - Img2ImgPageResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.edit_an_image_with_ai_prompt.img2img( - input_image="input_image", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v2/Img2Img/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "text_prompt": text_prompt, - "selected_model": selected_model, - "selected_controlnet_model": selected_controlnet_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "prompt_strength": prompt_strength, - "controlnet_conditioning_scale": controlnet_conditioning_scale, - "seed": seed, - "image_guidance_scale": image_guidance_scale, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(Img2ImgPageResponse, parse_obj_as(type_=Img2ImgPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def async_img2img( - self, - *, - input_image: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - text_prompt: typing.Optional[str] = OMIT, - selected_model: typing.Optional[Img2ImgPageRequestSelectedModel] = OMIT, - selected_controlnet_model: typing.Optional[Img2ImgPageRequestSelectedControlnetModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - prompt_strength: typing.Optional[float] = OMIT, - controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT, - seed: typing.Optional[int] = OMIT, - image_guidance_scale: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - input_image : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - text_prompt : typing.Optional[str] - - selected_model : typing.Optional[Img2ImgPageRequestSelectedModel] - - selected_controlnet_model : typing.Optional[Img2ImgPageRequestSelectedControlnetModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - prompt_strength : typing.Optional[float] - - controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]] - - seed : typing.Optional[int] - - image_guidance_scale : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.edit_an_image_with_ai_prompt.async_img2img( - input_image="input_image", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/Img2Img/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "text_prompt": text_prompt, - "selected_model": selected_model, - "selected_controlnet_model": selected_controlnet_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "prompt_strength": prompt_strength, - "controlnet_conditioning_scale": controlnet_conditioning_scale, - "seed": seed, - "image_guidance_scale": image_guidance_scale, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def status_img2img( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> Img2ImgPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - Img2ImgPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.edit_an_image_with_ai_prompt.status_img2img( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/Img2Img/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(Img2ImgPageStatusResponse, parse_obj_as(type_=Img2ImgPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncEditAnImageWithAiPromptClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def img2img( - self, - *, - input_image: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - text_prompt: typing.Optional[str] = OMIT, - selected_model: typing.Optional[Img2ImgPageRequestSelectedModel] = OMIT, - selected_controlnet_model: typing.Optional[Img2ImgPageRequestSelectedControlnetModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - prompt_strength: typing.Optional[float] = OMIT, - controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT, - seed: typing.Optional[int] = OMIT, - image_guidance_scale: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> Img2ImgPageResponse: - """ - Parameters - ---------- - input_image : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - text_prompt : typing.Optional[str] - - selected_model : typing.Optional[Img2ImgPageRequestSelectedModel] - - selected_controlnet_model : typing.Optional[Img2ImgPageRequestSelectedControlnetModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - prompt_strength : typing.Optional[float] - - controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]] - - seed : typing.Optional[int] - - image_guidance_scale : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - Img2ImgPageResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.edit_an_image_with_ai_prompt.img2img( - input_image="input_image", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v2/Img2Img/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "text_prompt": text_prompt, - "selected_model": selected_model, - "selected_controlnet_model": selected_controlnet_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "prompt_strength": prompt_strength, - "controlnet_conditioning_scale": controlnet_conditioning_scale, - "seed": seed, - "image_guidance_scale": image_guidance_scale, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(Img2ImgPageResponse, parse_obj_as(type_=Img2ImgPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def async_img2img( - self, - *, - input_image: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - text_prompt: typing.Optional[str] = OMIT, - selected_model: typing.Optional[Img2ImgPageRequestSelectedModel] = OMIT, - selected_controlnet_model: typing.Optional[Img2ImgPageRequestSelectedControlnetModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - prompt_strength: typing.Optional[float] = OMIT, - controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT, - seed: typing.Optional[int] = OMIT, - image_guidance_scale: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - input_image : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - text_prompt : typing.Optional[str] - - selected_model : typing.Optional[Img2ImgPageRequestSelectedModel] - - selected_controlnet_model : typing.Optional[Img2ImgPageRequestSelectedControlnetModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - prompt_strength : typing.Optional[float] - - controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]] - - seed : typing.Optional[int] - - image_guidance_scale : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.edit_an_image_with_ai_prompt.async_img2img( - input_image="input_image", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/Img2Img/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "text_prompt": text_prompt, - "selected_model": selected_model, - "selected_controlnet_model": selected_controlnet_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "prompt_strength": prompt_strength, - "controlnet_conditioning_scale": controlnet_conditioning_scale, - "seed": seed, - "image_guidance_scale": image_guidance_scale, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def status_img2img( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> Img2ImgPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - Img2ImgPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.edit_an_image_with_ai_prompt.status_img2img( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/Img2Img/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(Img2ImgPageStatusResponse, parse_obj_as(type_=Img2ImgPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/embeddings/client.py b/src/gooey/embeddings/client.py index 97b4b43..84f0604 100644 --- a/src/gooey/embeddings/client.py +++ b/src/gooey/embeddings/client.py @@ -5,234 +5,39 @@ from ..core.api_error import ApiError from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as from ..core.request_options import RequestOptions -from ..errors.internal_server_error import InternalServerError -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3 -from ..types.embeddings_page_request_selected_model import EmbeddingsPageRequestSelectedModel -from ..types.embeddings_page_response import EmbeddingsPageResponse -from ..types.embeddings_page_status_response import EmbeddingsPageStatusResponse -from ..types.failed_reponse_model_v2 import FailedReponseModelV2 -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError -from ..types.recipe_function import RecipeFunction -from ..types.run_settings import RunSettings - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) class EmbeddingsClient: def __init__(self, *, client_wrapper: SyncClientWrapper): self._client_wrapper = client_wrapper - def post( - self, - *, - texts: typing.Sequence[str], - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - selected_model: typing.Optional[EmbeddingsPageRequestSelectedModel] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> EmbeddingsPageResponse: + def post(self, *, request_options: typing.Optional[RequestOptions] = None) -> None: """ Parameters ---------- - texts : typing.Sequence[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - selected_model : typing.Optional[EmbeddingsPageRequestSelectedModel] - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - EmbeddingsPageResponse - Successful Response + None Examples -------- from gooey import Gooey client = Gooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) - client.embeddings.post( - texts=["texts"], - ) + client.embeddings.post() """ _response = self._client_wrapper.httpx_client.request( - "v2/embeddings/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "texts": texts, - "selected_model": selected_model, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, + "v2/embeddings/", method="POST", request_options=request_options ) try: if 200 <= _response.status_code < 300: - return typing.cast(EmbeddingsPageResponse, parse_obj_as(type_=EmbeddingsPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def async_embeddings( - self, - *, - texts: typing.Sequence[str], - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - selected_model: typing.Optional[EmbeddingsPageRequestSelectedModel] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - texts : typing.Sequence[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - selected_model : typing.Optional[EmbeddingsPageRequestSelectedModel] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.embeddings.async_embeddings( - texts=["texts"], - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/embeddings/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "texts": texts, - "selected_model": selected_model, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def status_embeddings( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> EmbeddingsPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EmbeddingsPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.embeddings.status_embeddings( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/embeddings/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(EmbeddingsPageStatusResponse, parse_obj_as(type_=EmbeddingsPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + return _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -243,195 +48,16 @@ class AsyncEmbeddingsClient: def __init__(self, *, client_wrapper: AsyncClientWrapper): self._client_wrapper = client_wrapper - async def post( - self, - *, - texts: typing.Sequence[str], - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - selected_model: typing.Optional[EmbeddingsPageRequestSelectedModel] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> EmbeddingsPageResponse: - """ - Parameters - ---------- - texts : typing.Sequence[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - selected_model : typing.Optional[EmbeddingsPageRequestSelectedModel] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EmbeddingsPageResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.embeddings.post( - texts=["texts"], - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v2/embeddings/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "texts": texts, - "selected_model": selected_model, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(EmbeddingsPageResponse, parse_obj_as(type_=EmbeddingsPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def async_embeddings( - self, - *, - texts: typing.Sequence[str], - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - selected_model: typing.Optional[EmbeddingsPageRequestSelectedModel] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: + async def post(self, *, request_options: typing.Optional[RequestOptions] = None) -> None: """ Parameters ---------- - texts : typing.Sequence[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - selected_model : typing.Optional[EmbeddingsPageRequestSelectedModel] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.embeddings.async_embeddings( - texts=["texts"], - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/embeddings/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "texts": texts, - "selected_model": selected_model, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def status_embeddings( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> EmbeddingsPageStatusResponse: - """ - Parameters - ---------- - run_id : str - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - EmbeddingsPageStatusResponse - Successful Response + None Examples -------- @@ -440,37 +66,22 @@ async def status_embeddings( from gooey import AsyncGooey client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) async def main() -> None: - await client.embeddings.status_embeddings( - run_id="run_id", - ) + await client.embeddings.post() asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/embeddings/status/", method="GET", params={"run_id": run_id}, request_options=request_options + "v2/embeddings/", method="POST", request_options=request_options ) try: if 200 <= _response.status_code < 300: - return typing.cast(EmbeddingsPageStatusResponse, parse_obj_as(type_=EmbeddingsPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + return _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) diff --git a/src/gooey/errors/__init__.py b/src/gooey/errors/__init__.py index 80b3d3e..19ea9c4 100644 --- a/src/gooey/errors/__init__.py +++ b/src/gooey/errors/__init__.py @@ -1,8 +1,7 @@ # This file was auto-generated by Fern from our API Definition. -from .internal_server_error import InternalServerError from .payment_required_error import PaymentRequiredError from .too_many_requests_error import TooManyRequestsError from .unprocessable_entity_error import UnprocessableEntityError -__all__ = ["InternalServerError", "PaymentRequiredError", "TooManyRequestsError", "UnprocessableEntityError"] +__all__ = ["PaymentRequiredError", "TooManyRequestsError", "UnprocessableEntityError"] diff --git a/src/gooey/errors/internal_server_error.py b/src/gooey/errors/internal_server_error.py deleted file mode 100644 index 3be52c0..0000000 --- a/src/gooey/errors/internal_server_error.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from ..core.api_error import ApiError -from ..types.failed_reponse_model_v2 import FailedReponseModelV2 - - -class InternalServerError(ApiError): - def __init__(self, body: FailedReponseModelV2): - super().__init__(status_code=500, body=body) diff --git a/src/gooey/evaluator/__init__.py b/src/gooey/evaluator/__init__.py index f3ea265..7ceefb0 100644 --- a/src/gooey/evaluator/__init__.py +++ b/src/gooey/evaluator/__init__.py @@ -1,2 +1,5 @@ # This file was auto-generated by Fern from our API Definition. +from .types import BulkEvalPageRequestResponseFormatType, BulkEvalPageRequestSelectedModel + +__all__ = ["BulkEvalPageRequestResponseFormatType", "BulkEvalPageRequestSelectedModel"] diff --git a/src/gooey/evaluator/client.py b/src/gooey/evaluator/client.py index bf4c9a6..7731555 100644 --- a/src/gooey/evaluator/client.py +++ b/src/gooey/evaluator/client.py @@ -7,21 +7,18 @@ from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper from ..core.pydantic_utilities import parse_obj_as from ..core.request_options import RequestOptions -from ..errors.internal_server_error import InternalServerError from ..errors.payment_required_error import PaymentRequiredError from ..errors.too_many_requests_error import TooManyRequestsError from ..errors.unprocessable_entity_error import UnprocessableEntityError from ..types.agg_function import AggFunction -from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3 -from ..types.bulk_eval_page_request_selected_model import BulkEvalPageRequestSelectedModel from ..types.bulk_eval_page_response import BulkEvalPageResponse -from ..types.bulk_eval_page_status_response import BulkEvalPageStatusResponse from ..types.eval_prompt import EvalPrompt -from ..types.failed_reponse_model_v2 import FailedReponseModelV2 from ..types.generic_error_response import GenericErrorResponse from ..types.http_validation_error import HttpValidationError from ..types.recipe_function import RecipeFunction from ..types.run_settings import RunSettings +from .types.bulk_eval_page_request_response_format_type import BulkEvalPageRequestResponseFormatType +from .types.bulk_eval_page_request_selected_model import BulkEvalPageRequestSelectedModel # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -31,20 +28,22 @@ class EvaluatorClient: def __init__(self, *, client_wrapper: SyncClientWrapper): self._client_wrapper = client_wrapper - def bulk_eval( + def async_bulk_eval( self, *, documents: typing.Sequence[str], + example_id: typing.Optional[str] = None, functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + eval_prompts: typing.Optional[typing.Sequence[EvalPrompt]] = OMIT, + agg_functions: typing.Optional[typing.Sequence[AggFunction]] = OMIT, selected_model: typing.Optional[BulkEvalPageRequestSelectedModel] = OMIT, avoid_repetition: typing.Optional[bool] = OMIT, num_outputs: typing.Optional[int] = OMIT, quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - eval_prompts: typing.Optional[typing.Sequence[EvalPrompt]] = OMIT, - agg_functions: typing.Optional[typing.Sequence[AggFunction]] = OMIT, + response_format_type: typing.Optional[BulkEvalPageRequestResponseFormatType] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None ) -> BulkEvalPageResponse: @@ -52,130 +51,29 @@ def bulk_eval( Parameters ---------- documents : typing.Sequence[str] + Upload or link to a CSV or google sheet that contains your sample input data. For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. Remember to includes header names in your CSV too. + + example_id : typing.Optional[str] + functions : typing.Optional[typing.Sequence[RecipeFunction]] variables : typing.Optional[typing.Dict[str, typing.Any]] Variables to be used as Jinja prompt templates and in functions as arguments - selected_model : typing.Optional[BulkEvalPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - eval_prompts : typing.Optional[typing.Sequence[EvalPrompt]] + Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values. _The `columns` dictionary can be used to reference the spreadsheet columns._ agg_functions : typing.Optional[typing.Sequence[AggFunction]] - Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats). - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - BulkEvalPageResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.evaluator.bulk_eval( - documents=["documents"], - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v2/bulk-eval/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "documents": documents, - "eval_prompts": eval_prompts, - "agg_functions": agg_functions, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(BulkEvalPageResponse, parse_obj_as(type_=BulkEvalPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def async_bulk_eval( - self, - *, - documents: typing.Sequence[str], - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - selected_model: typing.Optional[BulkEvalPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - eval_prompts: typing.Optional[typing.Sequence[EvalPrompt]] = OMIT, - agg_functions: typing.Optional[typing.Sequence[AggFunction]] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - documents : typing.Sequence[str] - Upload or link to a CSV or google sheet that contains your sample input data. - For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. - Remember to includes header names in your CSV too. - functions : typing.Optional[typing.Sequence[RecipeFunction]] + Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats). - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments selected_model : typing.Optional[BulkEvalPageRequestSelectedModel] @@ -189,13 +87,7 @@ def async_bulk_eval( sampling_temperature : typing.Optional[float] - eval_prompts : typing.Optional[typing.Sequence[EvalPrompt]] - Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values. - _The `columns` dictionary can be used to reference the spreadsheet columns._ - - - agg_functions : typing.Optional[typing.Sequence[AggFunction]] - Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats). + response_format_type : typing.Optional[BulkEvalPageRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -204,7 +96,7 @@ def async_bulk_eval( Returns ------- - AsyncApiResponseModelV3 + BulkEvalPageResponse Successful Response Examples @@ -212,7 +104,6 @@ def async_bulk_eval( from gooey import Gooey client = Gooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) client.evaluator.async_bulk_eval( @@ -220,20 +111,22 @@ def async_bulk_eval( ) """ _response = self._client_wrapper.httpx_client.request( - "v3/bulk-eval/async/", + "v3/bulk-eval/async", method="POST", + params={"example_id": example_id}, json={ "functions": functions, "variables": variables, + "documents": documents, + "eval_prompts": eval_prompts, + "agg_functions": agg_functions, "selected_model": selected_model, "avoid_repetition": avoid_repetition, "num_outputs": num_outputs, "quality": quality, "max_tokens": max_tokens, "sampling_temperature": sampling_temperature, - "documents": documents, - "eval_prompts": eval_prompts, - "agg_functions": agg_functions, + "response_format_type": response_format_type, "settings": settings, }, request_options=request_options, @@ -241,58 +134,7 @@ def async_bulk_eval( ) try: if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def status_bulk_eval( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> BulkEvalPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - BulkEvalPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.evaluator.status_bulk_eval( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/bulk-eval/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(BulkEvalPageStatusResponse, parse_obj_as(type_=BulkEvalPageStatusResponse, object_=_response.json())) # type: ignore + return typing.cast(BulkEvalPageResponse, parse_obj_as(type_=BulkEvalPageResponse, object_=_response.json())) # type: ignore if _response.status_code == 402: raise PaymentRequiredError( typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore @@ -315,20 +157,22 @@ class AsyncEvaluatorClient: def __init__(self, *, client_wrapper: AsyncClientWrapper): self._client_wrapper = client_wrapper - async def bulk_eval( + async def async_bulk_eval( self, *, documents: typing.Sequence[str], + example_id: typing.Optional[str] = None, functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, + eval_prompts: typing.Optional[typing.Sequence[EvalPrompt]] = OMIT, + agg_functions: typing.Optional[typing.Sequence[AggFunction]] = OMIT, selected_model: typing.Optional[BulkEvalPageRequestSelectedModel] = OMIT, avoid_repetition: typing.Optional[bool] = OMIT, num_outputs: typing.Optional[int] = OMIT, quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - eval_prompts: typing.Optional[typing.Sequence[EvalPrompt]] = OMIT, - agg_functions: typing.Optional[typing.Sequence[AggFunction]] = OMIT, + response_format_type: typing.Optional[BulkEvalPageRequestResponseFormatType] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None ) -> BulkEvalPageResponse: @@ -336,138 +180,29 @@ async def bulk_eval( Parameters ---------- documents : typing.Sequence[str] + Upload or link to a CSV or google sheet that contains your sample input data. For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. Remember to includes header names in your CSV too. + + example_id : typing.Optional[str] + functions : typing.Optional[typing.Sequence[RecipeFunction]] variables : typing.Optional[typing.Dict[str, typing.Any]] Variables to be used as Jinja prompt templates and in functions as arguments - selected_model : typing.Optional[BulkEvalPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - eval_prompts : typing.Optional[typing.Sequence[EvalPrompt]] + Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values. _The `columns` dictionary can be used to reference the spreadsheet columns._ agg_functions : typing.Optional[typing.Sequence[AggFunction]] - Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats). - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - BulkEvalPageResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.evaluator.bulk_eval( - documents=["documents"], - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v2/bulk-eval/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "documents": documents, - "eval_prompts": eval_prompts, - "agg_functions": agg_functions, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(BulkEvalPageResponse, parse_obj_as(type_=BulkEvalPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def async_bulk_eval( - self, - *, - documents: typing.Sequence[str], - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - selected_model: typing.Optional[BulkEvalPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - eval_prompts: typing.Optional[typing.Sequence[EvalPrompt]] = OMIT, - agg_functions: typing.Optional[typing.Sequence[AggFunction]] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - documents : typing.Sequence[str] - Upload or link to a CSV or google sheet that contains your sample input data. - For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. - Remember to includes header names in your CSV too. - functions : typing.Optional[typing.Sequence[RecipeFunction]] + Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats). - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments selected_model : typing.Optional[BulkEvalPageRequestSelectedModel] @@ -481,13 +216,7 @@ async def async_bulk_eval( sampling_temperature : typing.Optional[float] - eval_prompts : typing.Optional[typing.Sequence[EvalPrompt]] - Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values. - _The `columns` dictionary can be used to reference the spreadsheet columns._ - - - agg_functions : typing.Optional[typing.Sequence[AggFunction]] - Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats). + response_format_type : typing.Optional[BulkEvalPageRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -496,7 +225,7 @@ async def async_bulk_eval( Returns ------- - AsyncApiResponseModelV3 + BulkEvalPageResponse Successful Response Examples @@ -506,7 +235,6 @@ async def async_bulk_eval( from gooey import AsyncGooey client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) @@ -520,20 +248,22 @@ async def main() -> None: asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/bulk-eval/async/", + "v3/bulk-eval/async", method="POST", + params={"example_id": example_id}, json={ "functions": functions, "variables": variables, + "documents": documents, + "eval_prompts": eval_prompts, + "agg_functions": agg_functions, "selected_model": selected_model, "avoid_repetition": avoid_repetition, "num_outputs": num_outputs, "quality": quality, "max_tokens": max_tokens, "sampling_temperature": sampling_temperature, - "documents": documents, - "eval_prompts": eval_prompts, - "agg_functions": agg_functions, + "response_format_type": response_format_type, "settings": settings, }, request_options=request_options, @@ -541,66 +271,7 @@ async def main() -> None: ) try: if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def status_bulk_eval( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> BulkEvalPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - BulkEvalPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.evaluator.status_bulk_eval( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/bulk-eval/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(BulkEvalPageStatusResponse, parse_obj_as(type_=BulkEvalPageStatusResponse, object_=_response.json())) # type: ignore + return typing.cast(BulkEvalPageResponse, parse_obj_as(type_=BulkEvalPageResponse, object_=_response.json())) # type: ignore if _response.status_code == 402: raise PaymentRequiredError( typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore diff --git a/src/gooey/evaluator/types/__init__.py b/src/gooey/evaluator/types/__init__.py new file mode 100644 index 0000000..67f1384 --- /dev/null +++ b/src/gooey/evaluator/types/__init__.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +from .bulk_eval_page_request_response_format_type import BulkEvalPageRequestResponseFormatType +from .bulk_eval_page_request_selected_model import BulkEvalPageRequestSelectedModel + +__all__ = ["BulkEvalPageRequestResponseFormatType", "BulkEvalPageRequestSelectedModel"] diff --git a/src/gooey/evaluator/types/bulk_eval_page_request_response_format_type.py b/src/gooey/evaluator/types/bulk_eval_page_request_response_format_type.py new file mode 100644 index 0000000..f1c242f --- /dev/null +++ b/src/gooey/evaluator/types/bulk_eval_page_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +BulkEvalPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/bulk_eval_page_request_selected_model.py b/src/gooey/evaluator/types/bulk_eval_page_request_selected_model.py similarity index 79% rename from src/gooey/types/bulk_eval_page_request_selected_model.py rename to src/gooey/evaluator/types/bulk_eval_page_request_selected_model.py index 6175087..853cf33 100644 --- a/src/gooey/types/bulk_eval_page_request_selected_model.py +++ b/src/gooey/evaluator/types/bulk_eval_page_request_selected_model.py @@ -5,6 +5,8 @@ BulkEvalPageRequestSelectedModel = typing.Union[ typing.Literal[ "gpt_4_o", + "gpt_4_o_mini", + "chatgpt_4_o", "gpt_4_turbo_vision", "gpt_4_vision", "gpt_4_turbo", @@ -14,10 +16,14 @@ "gpt_3_5_turbo_16k", "gpt_3_5_turbo_instruct", "llama3_70b", + "llama_3_groq_70b_tool_use", "llama3_8b", + "llama_3_groq_8b_tool_use", "llama2_70b_chat", "mixtral_8x7b_instruct_0_1", + "gemma_2_9b_it", "gemma_7b_it", + "gemini_1_5_flash", "gemini_1_5_pro", "gemini_1_pro_vision", "gemini_1_pro", @@ -28,6 +34,8 @@ "claude_3_sonnet", "claude_3_haiku", "sea_lion_7b_instruct", + "llama3_8b_cpt_sea_lion_v2_instruct", + "sarvam_2b", "text_davinci_003", "text_davinci_002", "code_davinci_002", diff --git a/src/gooey/functions/client.py b/src/gooey/functions/client.py index 055d842..d15f376 100644 --- a/src/gooey/functions/client.py +++ b/src/gooey/functions/client.py @@ -7,14 +7,10 @@ from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper from ..core.pydantic_utilities import parse_obj_as from ..core.request_options import RequestOptions -from ..errors.internal_server_error import InternalServerError from ..errors.payment_required_error import PaymentRequiredError from ..errors.too_many_requests_error import TooManyRequestsError from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3 -from ..types.failed_reponse_model_v2 import FailedReponseModelV2 from ..types.functions_page_response import FunctionsPageResponse -from ..types.functions_page_status_response import FunctionsPageStatusResponse from ..types.generic_error_response import GenericErrorResponse from ..types.http_validation_error import HttpValidationError from ..types.run_settings import RunSettings @@ -27,9 +23,10 @@ class FunctionsClient: def __init__(self, *, client_wrapper: SyncClientWrapper): self._client_wrapper = client_wrapper - def post( + def async_functions( self, *, + example_id: typing.Optional[str] = None, code: typing.Optional[str] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, settings: typing.Optional[RunSettings] = OMIT, @@ -38,74 +35,8 @@ def post( """ Parameters ---------- - code : typing.Optional[str] - The JS code to be executed. - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used in the code - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - FunctionsPageResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.functions.post() - """ - _response = self._client_wrapper.httpx_client.request( - "v2/functions/", - method="POST", - json={"code": code, "variables": variables, "settings": settings}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(FunctionsPageResponse, parse_obj_as(type_=FunctionsPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + example_id : typing.Optional[str] - def async_functions( - self, - *, - code: typing.Optional[str] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- code : typing.Optional[str] The JS code to be executed. @@ -119,7 +50,7 @@ def async_functions( Returns ------- - AsyncApiResponseModelV3 + FunctionsPageResponse Successful Response Examples @@ -127,21 +58,21 @@ def async_functions( from gooey import Gooey client = Gooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) client.functions.async_functions() """ _response = self._client_wrapper.httpx_client.request( - "v3/functions/async/", + "v3/functions/async", method="POST", + params={"example_id": example_id}, json={"code": code, "variables": variables, "settings": settings}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore + return typing.cast(FunctionsPageResponse, parse_obj_as(type_=FunctionsPageResponse, object_=_response.json())) # type: ignore if _response.status_code == 402: raise PaymentRequiredError( typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore @@ -159,52 +90,32 @@ def async_functions( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def status_functions( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> FunctionsPageStatusResponse: + def post(self, *, request_options: typing.Optional[RequestOptions] = None) -> None: """ Parameters ---------- - run_id : str - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - FunctionsPageStatusResponse - Successful Response + None Examples -------- from gooey import Gooey client = Gooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) - client.functions.status_functions( - run_id="run_id", - ) + client.functions.post() """ _response = self._client_wrapper.httpx_client.request( - "v3/functions/status/", method="GET", params={"run_id": run_id}, request_options=request_options + "v2/functions/", method="POST", request_options=request_options ) try: if 200 <= _response.status_code < 300: - return typing.cast(FunctionsPageStatusResponse, parse_obj_as(type_=FunctionsPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + return _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -215,9 +126,10 @@ class AsyncFunctionsClient: def __init__(self, *, client_wrapper: AsyncClientWrapper): self._client_wrapper = client_wrapper - async def post( + async def async_functions( self, *, + example_id: typing.Optional[str] = None, code: typing.Optional[str] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, settings: typing.Optional[RunSettings] = OMIT, @@ -226,82 +138,8 @@ async def post( """ Parameters ---------- - code : typing.Optional[str] - The JS code to be executed. + example_id : typing.Optional[str] - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used in the code - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - FunctionsPageResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.functions.post() - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v2/functions/", - method="POST", - json={"code": code, "variables": variables, "settings": settings}, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(FunctionsPageResponse, parse_obj_as(type_=FunctionsPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def async_functions( - self, - *, - code: typing.Optional[str] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- code : typing.Optional[str] The JS code to be executed. @@ -315,7 +153,7 @@ async def async_functions( Returns ------- - AsyncApiResponseModelV3 + FunctionsPageResponse Successful Response Examples @@ -325,7 +163,6 @@ async def async_functions( from gooey import AsyncGooey client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) @@ -337,15 +174,16 @@ async def main() -> None: asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/functions/async/", + "v3/functions/async", method="POST", + params={"example_id": example_id}, json={"code": code, "variables": variables, "settings": settings}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore + return typing.cast(FunctionsPageResponse, parse_obj_as(type_=FunctionsPageResponse, object_=_response.json())) # type: ignore if _response.status_code == 402: raise PaymentRequiredError( typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore @@ -363,21 +201,16 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def status_functions( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> FunctionsPageStatusResponse: + async def post(self, *, request_options: typing.Optional[RequestOptions] = None) -> None: """ Parameters ---------- - run_id : str - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - FunctionsPageStatusResponse - Successful Response + None Examples -------- @@ -386,37 +219,22 @@ async def status_functions( from gooey import AsyncGooey client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) async def main() -> None: - await client.functions.status_functions( - run_id="run_id", - ) + await client.functions.post() asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/functions/status/", method="GET", params={"run_id": run_id}, request_options=request_options + "v2/functions/", method="POST", request_options=request_options ) try: if 200 <= _response.status_code < 300: - return typing.cast(FunctionsPageStatusResponse, parse_obj_as(type_=FunctionsPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + return _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) diff --git a/src/gooey/generate_people_also_ask_seo_content/__init__.py b/src/gooey/generate_people_also_ask_seo_content/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/generate_people_also_ask_seo_content/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/generate_people_also_ask_seo_content/client.py b/src/gooey/generate_people_also_ask_seo_content/client.py deleted file mode 100644 index ecbdf1e..0000000 --- a/src/gooey/generate_people_also_ask_seo_content/client.py +++ /dev/null @@ -1,788 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.internal_server_error import InternalServerError -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3 -from ..types.failed_reponse_model_v2 import FailedReponseModelV2 -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError -from ..types.recipe_function import RecipeFunction -from ..types.related_qn_a_page_request_embedding_model import RelatedQnAPageRequestEmbeddingModel -from ..types.related_qn_a_page_request_selected_model import RelatedQnAPageRequestSelectedModel -from ..types.related_qn_a_page_response import RelatedQnAPageResponse -from ..types.related_qn_a_page_status_response import RelatedQnAPageStatusResponse -from ..types.run_settings import RunSettings -from ..types.serp_search_location import SerpSearchLocation -from ..types.serp_search_type import SerpSearchType - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class GeneratePeopleAlsoAskSeoContentClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def related_qna_maker( - self, - *, - search_query: str, - site_filter: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, - scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - serp_search_type: typing.Optional[SerpSearchType] = OMIT, - scaleserp_search_field: typing.Optional[str] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - query_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[RelatedQnAPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - max_search_urls: typing.Optional[int] = OMIT, - max_references: typing.Optional[int] = OMIT, - max_context_words: typing.Optional[int] = OMIT, - scroll_jump: typing.Optional[int] = OMIT, - embedding_model: typing.Optional[RelatedQnAPageRequestEmbeddingModel] = OMIT, - dense_weight: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> RelatedQnAPageResponse: - """ - Parameters - ---------- - search_query : str - - site_filter : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.Sequence[str]] - DEPRECATED: use `serp_search_location` instead - - serp_search_type : typing.Optional[SerpSearchType] - - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - selected_model : typing.Optional[RelatedQnAPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - max_search_urls : typing.Optional[int] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - embedding_model : typing.Optional[RelatedQnAPageRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - RelatedQnAPageResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.generate_people_also_ask_seo_content.related_qna_maker( - search_query="search_query", - site_filter="site_filter", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v2/related-qna-maker/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, - "search_query": search_query, - "site_filter": site_filter, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "max_search_urls": max_search_urls, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(RelatedQnAPageResponse, parse_obj_as(type_=RelatedQnAPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def async_related_qna_maker( - self, - *, - search_query: str, - site_filter: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, - scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - serp_search_type: typing.Optional[SerpSearchType] = OMIT, - scaleserp_search_field: typing.Optional[str] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - query_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[RelatedQnAPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - max_search_urls: typing.Optional[int] = OMIT, - max_references: typing.Optional[int] = OMIT, - max_context_words: typing.Optional[int] = OMIT, - scroll_jump: typing.Optional[int] = OMIT, - embedding_model: typing.Optional[RelatedQnAPageRequestEmbeddingModel] = OMIT, - dense_weight: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - search_query : str - - site_filter : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.Sequence[str]] - DEPRECATED: use `serp_search_location` instead - - serp_search_type : typing.Optional[SerpSearchType] - - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - selected_model : typing.Optional[RelatedQnAPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - max_search_urls : typing.Optional[int] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - embedding_model : typing.Optional[RelatedQnAPageRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.generate_people_also_ask_seo_content.async_related_qna_maker( - search_query="search_query", - site_filter="site_filter", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/related-qna-maker/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, - "search_query": search_query, - "site_filter": site_filter, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "max_search_urls": max_search_urls, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def status_related_qna_maker( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> RelatedQnAPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - RelatedQnAPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.generate_people_also_ask_seo_content.status_related_qna_maker( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/related-qna-maker/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(RelatedQnAPageStatusResponse, parse_obj_as(type_=RelatedQnAPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncGeneratePeopleAlsoAskSeoContentClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def related_qna_maker( - self, - *, - search_query: str, - site_filter: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, - scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - serp_search_type: typing.Optional[SerpSearchType] = OMIT, - scaleserp_search_field: typing.Optional[str] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - query_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[RelatedQnAPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - max_search_urls: typing.Optional[int] = OMIT, - max_references: typing.Optional[int] = OMIT, - max_context_words: typing.Optional[int] = OMIT, - scroll_jump: typing.Optional[int] = OMIT, - embedding_model: typing.Optional[RelatedQnAPageRequestEmbeddingModel] = OMIT, - dense_weight: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> RelatedQnAPageResponse: - """ - Parameters - ---------- - search_query : str - - site_filter : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.Sequence[str]] - DEPRECATED: use `serp_search_location` instead - - serp_search_type : typing.Optional[SerpSearchType] - - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - selected_model : typing.Optional[RelatedQnAPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - max_search_urls : typing.Optional[int] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - embedding_model : typing.Optional[RelatedQnAPageRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - RelatedQnAPageResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.generate_people_also_ask_seo_content.related_qna_maker( - search_query="search_query", - site_filter="site_filter", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v2/related-qna-maker/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, - "search_query": search_query, - "site_filter": site_filter, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "max_search_urls": max_search_urls, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(RelatedQnAPageResponse, parse_obj_as(type_=RelatedQnAPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def async_related_qna_maker( - self, - *, - search_query: str, - site_filter: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, - scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - serp_search_type: typing.Optional[SerpSearchType] = OMIT, - scaleserp_search_field: typing.Optional[str] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - query_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[RelatedQnAPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - max_search_urls: typing.Optional[int] = OMIT, - max_references: typing.Optional[int] = OMIT, - max_context_words: typing.Optional[int] = OMIT, - scroll_jump: typing.Optional[int] = OMIT, - embedding_model: typing.Optional[RelatedQnAPageRequestEmbeddingModel] = OMIT, - dense_weight: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - search_query : str - - site_filter : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.Sequence[str]] - DEPRECATED: use `serp_search_location` instead - - serp_search_type : typing.Optional[SerpSearchType] - - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - selected_model : typing.Optional[RelatedQnAPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - max_search_urls : typing.Optional[int] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - embedding_model : typing.Optional[RelatedQnAPageRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.generate_people_also_ask_seo_content.async_related_qna_maker( - search_query="search_query", - site_filter="site_filter", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/related-qna-maker/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, - "search_query": search_query, - "site_filter": site_filter, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "max_search_urls": max_search_urls, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def status_related_qna_maker( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> RelatedQnAPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - RelatedQnAPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.generate_people_also_ask_seo_content.status_related_qna_maker( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/related-qna-maker/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(RelatedQnAPageStatusResponse, parse_obj_as(type_=RelatedQnAPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/generate_product_photo_backgrounds/__init__.py b/src/gooey/generate_product_photo_backgrounds/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/generate_product_photo_backgrounds/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/generate_product_photo_backgrounds/client.py b/src/gooey/generate_product_photo_backgrounds/client.py deleted file mode 100644 index 8929204..0000000 --- a/src/gooey/generate_product_photo_backgrounds/client.py +++ /dev/null @@ -1,689 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.internal_server_error import InternalServerError -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3 -from ..types.failed_reponse_model_v2 import FailedReponseModelV2 -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError -from ..types.object_inpainting_page_request_selected_model import ObjectInpaintingPageRequestSelectedModel -from ..types.object_inpainting_page_response import ObjectInpaintingPageResponse -from ..types.object_inpainting_page_status_response import ObjectInpaintingPageStatusResponse -from ..types.recipe_function import RecipeFunction -from ..types.run_settings import RunSettings - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class GenerateProductPhotoBackgroundsClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def object_inpainting( - self, - *, - input_image: str, - text_prompt: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - obj_scale: typing.Optional[float] = OMIT, - obj_pos_x: typing.Optional[float] = OMIT, - obj_pos_y: typing.Optional[float] = OMIT, - mask_threshold: typing.Optional[float] = OMIT, - selected_model: typing.Optional[ObjectInpaintingPageRequestSelectedModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - sd2upscaling: typing.Optional[bool] = OMIT, - seed: typing.Optional[int] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> ObjectInpaintingPageResponse: - """ - Parameters - ---------- - input_image : str - - text_prompt : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - obj_scale : typing.Optional[float] - - obj_pos_x : typing.Optional[float] - - obj_pos_y : typing.Optional[float] - - mask_threshold : typing.Optional[float] - - selected_model : typing.Optional[ObjectInpaintingPageRequestSelectedModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - sd2upscaling : typing.Optional[bool] - - seed : typing.Optional[int] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ObjectInpaintingPageResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.generate_product_photo_backgrounds.object_inpainting( - input_image="input_image", - text_prompt="text_prompt", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v2/ObjectInpainting/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "text_prompt": text_prompt, - "obj_scale": obj_scale, - "obj_pos_x": obj_pos_x, - "obj_pos_y": obj_pos_y, - "mask_threshold": mask_threshold, - "selected_model": selected_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "sd_2_upscaling": sd2upscaling, - "seed": seed, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(ObjectInpaintingPageResponse, parse_obj_as(type_=ObjectInpaintingPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def async_object_inpainting( - self, - *, - input_image: str, - text_prompt: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - obj_scale: typing.Optional[float] = OMIT, - obj_pos_x: typing.Optional[float] = OMIT, - obj_pos_y: typing.Optional[float] = OMIT, - mask_threshold: typing.Optional[float] = OMIT, - selected_model: typing.Optional[ObjectInpaintingPageRequestSelectedModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - sd2upscaling: typing.Optional[bool] = OMIT, - seed: typing.Optional[int] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - input_image : str - - text_prompt : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - obj_scale : typing.Optional[float] - - obj_pos_x : typing.Optional[float] - - obj_pos_y : typing.Optional[float] - - mask_threshold : typing.Optional[float] - - selected_model : typing.Optional[ObjectInpaintingPageRequestSelectedModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - sd2upscaling : typing.Optional[bool] - - seed : typing.Optional[int] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.generate_product_photo_backgrounds.async_object_inpainting( - input_image="input_image", - text_prompt="text_prompt", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/ObjectInpainting/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "text_prompt": text_prompt, - "obj_scale": obj_scale, - "obj_pos_x": obj_pos_x, - "obj_pos_y": obj_pos_y, - "mask_threshold": mask_threshold, - "selected_model": selected_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "sd_2_upscaling": sd2upscaling, - "seed": seed, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def status_object_inpainting( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> ObjectInpaintingPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ObjectInpaintingPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.generate_product_photo_backgrounds.status_object_inpainting( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/ObjectInpainting/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(ObjectInpaintingPageStatusResponse, parse_obj_as(type_=ObjectInpaintingPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncGenerateProductPhotoBackgroundsClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def object_inpainting( - self, - *, - input_image: str, - text_prompt: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - obj_scale: typing.Optional[float] = OMIT, - obj_pos_x: typing.Optional[float] = OMIT, - obj_pos_y: typing.Optional[float] = OMIT, - mask_threshold: typing.Optional[float] = OMIT, - selected_model: typing.Optional[ObjectInpaintingPageRequestSelectedModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - sd2upscaling: typing.Optional[bool] = OMIT, - seed: typing.Optional[int] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> ObjectInpaintingPageResponse: - """ - Parameters - ---------- - input_image : str - - text_prompt : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - obj_scale : typing.Optional[float] - - obj_pos_x : typing.Optional[float] - - obj_pos_y : typing.Optional[float] - - mask_threshold : typing.Optional[float] - - selected_model : typing.Optional[ObjectInpaintingPageRequestSelectedModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - sd2upscaling : typing.Optional[bool] - - seed : typing.Optional[int] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ObjectInpaintingPageResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.generate_product_photo_backgrounds.object_inpainting( - input_image="input_image", - text_prompt="text_prompt", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v2/ObjectInpainting/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "text_prompt": text_prompt, - "obj_scale": obj_scale, - "obj_pos_x": obj_pos_x, - "obj_pos_y": obj_pos_y, - "mask_threshold": mask_threshold, - "selected_model": selected_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "sd_2_upscaling": sd2upscaling, - "seed": seed, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(ObjectInpaintingPageResponse, parse_obj_as(type_=ObjectInpaintingPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def async_object_inpainting( - self, - *, - input_image: str, - text_prompt: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - obj_scale: typing.Optional[float] = OMIT, - obj_pos_x: typing.Optional[float] = OMIT, - obj_pos_y: typing.Optional[float] = OMIT, - mask_threshold: typing.Optional[float] = OMIT, - selected_model: typing.Optional[ObjectInpaintingPageRequestSelectedModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - sd2upscaling: typing.Optional[bool] = OMIT, - seed: typing.Optional[int] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - input_image : str - - text_prompt : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - obj_scale : typing.Optional[float] - - obj_pos_x : typing.Optional[float] - - obj_pos_y : typing.Optional[float] - - mask_threshold : typing.Optional[float] - - selected_model : typing.Optional[ObjectInpaintingPageRequestSelectedModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - sd2upscaling : typing.Optional[bool] - - seed : typing.Optional[int] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.generate_product_photo_backgrounds.async_object_inpainting( - input_image="input_image", - text_prompt="text_prompt", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/ObjectInpainting/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "text_prompt": text_prompt, - "obj_scale": obj_scale, - "obj_pos_x": obj_pos_x, - "obj_pos_y": obj_pos_y, - "mask_threshold": mask_threshold, - "selected_model": selected_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "sd_2_upscaling": sd2upscaling, - "seed": seed, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def status_object_inpainting( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> ObjectInpaintingPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ObjectInpaintingPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.generate_product_photo_backgrounds.status_object_inpainting( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/ObjectInpainting/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(ObjectInpaintingPageStatusResponse, parse_obj_as(type_=ObjectInpaintingPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/large_language_models_gpt3/__init__.py b/src/gooey/large_language_models_gpt3/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/large_language_models_gpt3/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/large_language_models_gpt3/client.py b/src/gooey/large_language_models_gpt3/client.py deleted file mode 100644 index 55cfdd3..0000000 --- a/src/gooey/large_language_models_gpt3/client.py +++ /dev/null @@ -1,566 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.internal_server_error import InternalServerError -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3 -from ..types.compare_llm_page_request_response_format_type import CompareLlmPageRequestResponseFormatType -from ..types.compare_llm_page_request_selected_models_item import CompareLlmPageRequestSelectedModelsItem -from ..types.compare_llm_page_response import CompareLlmPageResponse -from ..types.compare_llm_page_status_response import CompareLlmPageStatusResponse -from ..types.failed_reponse_model_v2 import FailedReponseModelV2 -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError -from ..types.recipe_function import RecipeFunction -from ..types.run_settings import RunSettings - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class LargeLanguageModelsGpt3Client: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def compare_llm( - self, - *, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - input_prompt: typing.Optional[str] = OMIT, - selected_models: typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[CompareLlmPageRequestResponseFormatType] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> CompareLlmPageResponse: - """ - Parameters - ---------- - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - input_prompt : typing.Optional[str] - - selected_models : typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[CompareLlmPageRequestResponseFormatType] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - CompareLlmPageResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.large_language_models_gpt3.compare_llm() - """ - _response = self._client_wrapper.httpx_client.request( - "v2/CompareLLM/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "input_prompt": input_prompt, - "selected_models": selected_models, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(CompareLlmPageResponse, parse_obj_as(type_=CompareLlmPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def async_compare_llm( - self, - *, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - input_prompt: typing.Optional[str] = OMIT, - selected_models: typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[CompareLlmPageRequestResponseFormatType] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - input_prompt : typing.Optional[str] - - selected_models : typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[CompareLlmPageRequestResponseFormatType] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.large_language_models_gpt3.async_compare_llm() - """ - _response = self._client_wrapper.httpx_client.request( - "v3/CompareLLM/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "input_prompt": input_prompt, - "selected_models": selected_models, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def status_compare_llm( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> CompareLlmPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - CompareLlmPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.large_language_models_gpt3.status_compare_llm( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/CompareLLM/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(CompareLlmPageStatusResponse, parse_obj_as(type_=CompareLlmPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncLargeLanguageModelsGpt3Client: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def compare_llm( - self, - *, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - input_prompt: typing.Optional[str] = OMIT, - selected_models: typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[CompareLlmPageRequestResponseFormatType] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> CompareLlmPageResponse: - """ - Parameters - ---------- - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - input_prompt : typing.Optional[str] - - selected_models : typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[CompareLlmPageRequestResponseFormatType] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - CompareLlmPageResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.large_language_models_gpt3.compare_llm() - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v2/CompareLLM/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "input_prompt": input_prompt, - "selected_models": selected_models, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(CompareLlmPageResponse, parse_obj_as(type_=CompareLlmPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def async_compare_llm( - self, - *, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - input_prompt: typing.Optional[str] = OMIT, - selected_models: typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[CompareLlmPageRequestResponseFormatType] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - input_prompt : typing.Optional[str] - - selected_models : typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[CompareLlmPageRequestResponseFormatType] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.large_language_models_gpt3.async_compare_llm() - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/CompareLLM/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "input_prompt": input_prompt, - "selected_models": selected_models, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def status_compare_llm( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> CompareLlmPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - CompareLlmPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.large_language_models_gpt3.status_compare_llm( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/CompareLLM/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(CompareLlmPageStatusResponse, parse_obj_as(type_=CompareLlmPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/letter_writer/__init__.py b/src/gooey/letter_writer/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/letter_writer/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/letter_writer/client.py b/src/gooey/letter_writer/client.py deleted file mode 100644 index ec340f7..0000000 --- a/src/gooey/letter_writer/client.py +++ /dev/null @@ -1,669 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.internal_server_error import InternalServerError -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3 -from ..types.failed_reponse_model_v2 import FailedReponseModelV2 -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError -from ..types.letter_writer_page_response import LetterWriterPageResponse -from ..types.letter_writer_page_status_response import LetterWriterPageStatusResponse -from ..types.recipe_function import RecipeFunction -from ..types.run_settings import RunSettings -from ..types.training_data_model import TrainingDataModel - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class LetterWriterClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def letter_writer( - self, - *, - action_id: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - prompt_header: typing.Optional[str] = OMIT, - example_letters: typing.Optional[typing.Sequence[TrainingDataModel]] = OMIT, - lm_selected_api: typing.Optional[str] = OMIT, - lm_selected_engine: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - lm_sampling_temperature: typing.Optional[float] = OMIT, - api_http_method: typing.Optional[str] = OMIT, - api_url: typing.Optional[str] = OMIT, - api_headers: typing.Optional[str] = OMIT, - api_json_body: typing.Optional[str] = OMIT, - input_prompt: typing.Optional[str] = OMIT, - strip_html2text: typing.Optional[bool] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> LetterWriterPageResponse: - """ - Parameters - ---------- - action_id : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - prompt_header : typing.Optional[str] - - example_letters : typing.Optional[typing.Sequence[TrainingDataModel]] - - lm_selected_api : typing.Optional[str] - - lm_selected_engine : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - lm_sampling_temperature : typing.Optional[float] - - api_http_method : typing.Optional[str] - - api_url : typing.Optional[str] - - api_headers : typing.Optional[str] - - api_json_body : typing.Optional[str] - - input_prompt : typing.Optional[str] - - strip_html2text : typing.Optional[bool] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - LetterWriterPageResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.letter_writer.letter_writer( - action_id="action_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v2/LetterWriter/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "action_id": action_id, - "prompt_header": prompt_header, - "example_letters": example_letters, - "lm_selected_api": lm_selected_api, - "lm_selected_engine": lm_selected_engine, - "num_outputs": num_outputs, - "quality": quality, - "lm_sampling_temperature": lm_sampling_temperature, - "api_http_method": api_http_method, - "api_url": api_url, - "api_headers": api_headers, - "api_json_body": api_json_body, - "input_prompt": input_prompt, - "strip_html_2_text": strip_html2text, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(LetterWriterPageResponse, parse_obj_as(type_=LetterWriterPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def async_letter_writer( - self, - *, - action_id: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - prompt_header: typing.Optional[str] = OMIT, - example_letters: typing.Optional[typing.Sequence[TrainingDataModel]] = OMIT, - lm_selected_api: typing.Optional[str] = OMIT, - lm_selected_engine: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - lm_sampling_temperature: typing.Optional[float] = OMIT, - api_http_method: typing.Optional[str] = OMIT, - api_url: typing.Optional[str] = OMIT, - api_headers: typing.Optional[str] = OMIT, - api_json_body: typing.Optional[str] = OMIT, - input_prompt: typing.Optional[str] = OMIT, - strip_html2text: typing.Optional[bool] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - action_id : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - prompt_header : typing.Optional[str] - - example_letters : typing.Optional[typing.Sequence[TrainingDataModel]] - - lm_selected_api : typing.Optional[str] - - lm_selected_engine : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - lm_sampling_temperature : typing.Optional[float] - - api_http_method : typing.Optional[str] - - api_url : typing.Optional[str] - - api_headers : typing.Optional[str] - - api_json_body : typing.Optional[str] - - input_prompt : typing.Optional[str] - - strip_html2text : typing.Optional[bool] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.letter_writer.async_letter_writer( - action_id="action_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/LetterWriter/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "action_id": action_id, - "prompt_header": prompt_header, - "example_letters": example_letters, - "lm_selected_api": lm_selected_api, - "lm_selected_engine": lm_selected_engine, - "num_outputs": num_outputs, - "quality": quality, - "lm_sampling_temperature": lm_sampling_temperature, - "api_http_method": api_http_method, - "api_url": api_url, - "api_headers": api_headers, - "api_json_body": api_json_body, - "input_prompt": input_prompt, - "strip_html_2_text": strip_html2text, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def status_letter_writer( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> LetterWriterPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - LetterWriterPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.letter_writer.status_letter_writer( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/LetterWriter/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(LetterWriterPageStatusResponse, parse_obj_as(type_=LetterWriterPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncLetterWriterClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def letter_writer( - self, - *, - action_id: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - prompt_header: typing.Optional[str] = OMIT, - example_letters: typing.Optional[typing.Sequence[TrainingDataModel]] = OMIT, - lm_selected_api: typing.Optional[str] = OMIT, - lm_selected_engine: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - lm_sampling_temperature: typing.Optional[float] = OMIT, - api_http_method: typing.Optional[str] = OMIT, - api_url: typing.Optional[str] = OMIT, - api_headers: typing.Optional[str] = OMIT, - api_json_body: typing.Optional[str] = OMIT, - input_prompt: typing.Optional[str] = OMIT, - strip_html2text: typing.Optional[bool] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> LetterWriterPageResponse: - """ - Parameters - ---------- - action_id : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - prompt_header : typing.Optional[str] - - example_letters : typing.Optional[typing.Sequence[TrainingDataModel]] - - lm_selected_api : typing.Optional[str] - - lm_selected_engine : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - lm_sampling_temperature : typing.Optional[float] - - api_http_method : typing.Optional[str] - - api_url : typing.Optional[str] - - api_headers : typing.Optional[str] - - api_json_body : typing.Optional[str] - - input_prompt : typing.Optional[str] - - strip_html2text : typing.Optional[bool] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - LetterWriterPageResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.letter_writer.letter_writer( - action_id="action_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v2/LetterWriter/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "action_id": action_id, - "prompt_header": prompt_header, - "example_letters": example_letters, - "lm_selected_api": lm_selected_api, - "lm_selected_engine": lm_selected_engine, - "num_outputs": num_outputs, - "quality": quality, - "lm_sampling_temperature": lm_sampling_temperature, - "api_http_method": api_http_method, - "api_url": api_url, - "api_headers": api_headers, - "api_json_body": api_json_body, - "input_prompt": input_prompt, - "strip_html_2_text": strip_html2text, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(LetterWriterPageResponse, parse_obj_as(type_=LetterWriterPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def async_letter_writer( - self, - *, - action_id: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - prompt_header: typing.Optional[str] = OMIT, - example_letters: typing.Optional[typing.Sequence[TrainingDataModel]] = OMIT, - lm_selected_api: typing.Optional[str] = OMIT, - lm_selected_engine: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - lm_sampling_temperature: typing.Optional[float] = OMIT, - api_http_method: typing.Optional[str] = OMIT, - api_url: typing.Optional[str] = OMIT, - api_headers: typing.Optional[str] = OMIT, - api_json_body: typing.Optional[str] = OMIT, - input_prompt: typing.Optional[str] = OMIT, - strip_html2text: typing.Optional[bool] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - action_id : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - prompt_header : typing.Optional[str] - - example_letters : typing.Optional[typing.Sequence[TrainingDataModel]] - - lm_selected_api : typing.Optional[str] - - lm_selected_engine : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - lm_sampling_temperature : typing.Optional[float] - - api_http_method : typing.Optional[str] - - api_url : typing.Optional[str] - - api_headers : typing.Optional[str] - - api_json_body : typing.Optional[str] - - input_prompt : typing.Optional[str] - - strip_html2text : typing.Optional[bool] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.letter_writer.async_letter_writer( - action_id="action_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/LetterWriter/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "action_id": action_id, - "prompt_header": prompt_header, - "example_letters": example_letters, - "lm_selected_api": lm_selected_api, - "lm_selected_engine": lm_selected_engine, - "num_outputs": num_outputs, - "quality": quality, - "lm_sampling_temperature": lm_sampling_temperature, - "api_http_method": api_http_method, - "api_url": api_url, - "api_headers": api_headers, - "api_json_body": api_json_body, - "input_prompt": input_prompt, - "strip_html_2_text": strip_html2text, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def status_letter_writer( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> LetterWriterPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - LetterWriterPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.letter_writer.status_letter_writer( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/LetterWriter/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(LetterWriterPageStatusResponse, parse_obj_as(type_=LetterWriterPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/lip_syncing/__init__.py b/src/gooey/lip_syncing/__init__.py index f3ea265..4d094b1 100644 --- a/src/gooey/lip_syncing/__init__.py +++ b/src/gooey/lip_syncing/__init__.py @@ -1,2 +1,5 @@ # This file was auto-generated by Fern from our API Definition. +from .types import LipsyncPageRequestSelectedModel + +__all__ = ["LipsyncPageRequestSelectedModel"] diff --git a/src/gooey/lip_syncing/client.py b/src/gooey/lip_syncing/client.py index 31872ed..2e99a69 100644 --- a/src/gooey/lip_syncing/client.py +++ b/src/gooey/lip_syncing/client.py @@ -7,20 +7,16 @@ from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper from ..core.pydantic_utilities import parse_obj_as from ..core.request_options import RequestOptions -from ..errors.internal_server_error import InternalServerError from ..errors.payment_required_error import PaymentRequiredError from ..errors.too_many_requests_error import TooManyRequestsError from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3 -from ..types.failed_reponse_model_v2 import FailedReponseModelV2 from ..types.generic_error_response import GenericErrorResponse from ..types.http_validation_error import HttpValidationError -from ..types.lipsync_page_request_selected_model import LipsyncPageRequestSelectedModel from ..types.lipsync_page_response import LipsyncPageResponse -from ..types.lipsync_page_status_response import LipsyncPageStatusResponse from ..types.recipe_function import RecipeFunction from ..types.run_settings import RunSettings from ..types.sad_talker_settings import SadTalkerSettings +from .types.lipsync_page_request_selected_model import LipsyncPageRequestSelectedModel # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -30,9 +26,10 @@ class LipSyncingClient: def __init__(self, *, client_wrapper: SyncClientWrapper): self._client_wrapper = client_wrapper - def lipsync( + def async_lipsync( self, *, + example_id: typing.Optional[str] = None, functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, input_face: typing.Optional[str] = OMIT, @@ -49,109 +46,8 @@ def lipsync( """ Parameters ---------- - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - input_face : typing.Optional[str] - - face_padding_top : typing.Optional[int] - - face_padding_bottom : typing.Optional[int] - - face_padding_left : typing.Optional[int] - - face_padding_right : typing.Optional[int] - - sadtalker_settings : typing.Optional[SadTalkerSettings] - - selected_model : typing.Optional[LipsyncPageRequestSelectedModel] + example_id : typing.Optional[str] - input_audio : typing.Optional[str] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - LipsyncPageResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.lip_syncing.lipsync() - """ - _response = self._client_wrapper.httpx_client.request( - "v2/Lipsync/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "input_face": input_face, - "face_padding_top": face_padding_top, - "face_padding_bottom": face_padding_bottom, - "face_padding_left": face_padding_left, - "face_padding_right": face_padding_right, - "sadtalker_settings": sadtalker_settings, - "selected_model": selected_model, - "input_audio": input_audio, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(LipsyncPageResponse, parse_obj_as(type_=LipsyncPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def async_lipsync( - self, - *, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - input_face: typing.Optional[str] = OMIT, - face_padding_top: typing.Optional[int] = OMIT, - face_padding_bottom: typing.Optional[int] = OMIT, - face_padding_left: typing.Optional[int] = OMIT, - face_padding_right: typing.Optional[int] = OMIT, - sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT, - selected_model: typing.Optional[LipsyncPageRequestSelectedModel] = OMIT, - input_audio: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- functions : typing.Optional[typing.Sequence[RecipeFunction]] variables : typing.Optional[typing.Dict[str, typing.Any]] @@ -180,7 +76,7 @@ def async_lipsync( Returns ------- - AsyncApiResponseModelV3 + LipsyncPageResponse Successful Response Examples @@ -188,14 +84,14 @@ def async_lipsync( from gooey import Gooey client = Gooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) client.lip_syncing.async_lipsync() """ _response = self._client_wrapper.httpx_client.request( - "v3/Lipsync/async/", + "v3/Lipsync/async", method="POST", + params={"example_id": example_id}, json={ "functions": functions, "variables": variables, @@ -214,58 +110,7 @@ def async_lipsync( ) try: if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def status_lipsync( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> LipsyncPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - LipsyncPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.lip_syncing.status_lipsync( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/Lipsync/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(LipsyncPageStatusResponse, parse_obj_as(type_=LipsyncPageStatusResponse, object_=_response.json())) # type: ignore + return typing.cast(LipsyncPageResponse, parse_obj_as(type_=LipsyncPageResponse, object_=_response.json())) # type: ignore if _response.status_code == 402: raise PaymentRequiredError( typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore @@ -288,9 +133,10 @@ class AsyncLipSyncingClient: def __init__(self, *, client_wrapper: AsyncClientWrapper): self._client_wrapper = client_wrapper - async def lipsync( + async def async_lipsync( self, *, + example_id: typing.Optional[str] = None, functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, input_face: typing.Optional[str] = OMIT, @@ -307,117 +153,8 @@ async def lipsync( """ Parameters ---------- - functions : typing.Optional[typing.Sequence[RecipeFunction]] + example_id : typing.Optional[str] - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - input_face : typing.Optional[str] - - face_padding_top : typing.Optional[int] - - face_padding_bottom : typing.Optional[int] - - face_padding_left : typing.Optional[int] - - face_padding_right : typing.Optional[int] - - sadtalker_settings : typing.Optional[SadTalkerSettings] - - selected_model : typing.Optional[LipsyncPageRequestSelectedModel] - - input_audio : typing.Optional[str] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - LipsyncPageResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.lip_syncing.lipsync() - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v2/Lipsync/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "input_face": input_face, - "face_padding_top": face_padding_top, - "face_padding_bottom": face_padding_bottom, - "face_padding_left": face_padding_left, - "face_padding_right": face_padding_right, - "sadtalker_settings": sadtalker_settings, - "selected_model": selected_model, - "input_audio": input_audio, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(LipsyncPageResponse, parse_obj_as(type_=LipsyncPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def async_lipsync( - self, - *, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - input_face: typing.Optional[str] = OMIT, - face_padding_top: typing.Optional[int] = OMIT, - face_padding_bottom: typing.Optional[int] = OMIT, - face_padding_left: typing.Optional[int] = OMIT, - face_padding_right: typing.Optional[int] = OMIT, - sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT, - selected_model: typing.Optional[LipsyncPageRequestSelectedModel] = OMIT, - input_audio: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- functions : typing.Optional[typing.Sequence[RecipeFunction]] variables : typing.Optional[typing.Dict[str, typing.Any]] @@ -446,7 +183,7 @@ async def async_lipsync( Returns ------- - AsyncApiResponseModelV3 + LipsyncPageResponse Successful Response Examples @@ -456,7 +193,6 @@ async def async_lipsync( from gooey import AsyncGooey client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) @@ -468,8 +204,9 @@ async def main() -> None: asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/Lipsync/async/", + "v3/Lipsync/async", method="POST", + params={"example_id": example_id}, json={ "functions": functions, "variables": variables, @@ -488,66 +225,7 @@ async def main() -> None: ) try: if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def status_lipsync( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> LipsyncPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - LipsyncPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.lip_syncing.status_lipsync( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/Lipsync/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(LipsyncPageStatusResponse, parse_obj_as(type_=LipsyncPageStatusResponse, object_=_response.json())) # type: ignore + return typing.cast(LipsyncPageResponse, parse_obj_as(type_=LipsyncPageResponse, object_=_response.json())) # type: ignore if _response.status_code == 402: raise PaymentRequiredError( typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore diff --git a/src/gooey/lip_syncing/types/__init__.py b/src/gooey/lip_syncing/types/__init__.py new file mode 100644 index 0000000..e7e3b85 --- /dev/null +++ b/src/gooey/lip_syncing/types/__init__.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +from .lipsync_page_request_selected_model import LipsyncPageRequestSelectedModel + +__all__ = ["LipsyncPageRequestSelectedModel"] diff --git a/src/gooey/types/lipsync_page_request_selected_model.py b/src/gooey/lip_syncing/types/lipsync_page_request_selected_model.py similarity index 100% rename from src/gooey/types/lipsync_page_request_selected_model.py rename to src/gooey/lip_syncing/types/lipsync_page_request_selected_model.py diff --git a/src/gooey/lipsync_video_with_any_text/__init__.py b/src/gooey/lipsync_video_with_any_text/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/lipsync_video_with_any_text/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/lipsync_video_with_any_text/client.py b/src/gooey/lipsync_video_with_any_text/client.py deleted file mode 100644 index 756634c..0000000 --- a/src/gooey/lipsync_video_with_any_text/client.py +++ /dev/null @@ -1,869 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.internal_server_error import InternalServerError -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3 -from ..types.failed_reponse_model_v2 import FailedReponseModelV2 -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError -from ..types.lipsync_tts_page_request_openai_tts_model import LipsyncTtsPageRequestOpenaiTtsModel -from ..types.lipsync_tts_page_request_openai_voice_name import LipsyncTtsPageRequestOpenaiVoiceName -from ..types.lipsync_tts_page_request_selected_model import LipsyncTtsPageRequestSelectedModel -from ..types.lipsync_tts_page_request_tts_provider import LipsyncTtsPageRequestTtsProvider -from ..types.lipsync_tts_page_response import LipsyncTtsPageResponse -from ..types.lipsync_tts_page_status_response import LipsyncTtsPageStatusResponse -from ..types.recipe_function import RecipeFunction -from ..types.run_settings import RunSettings -from ..types.sad_talker_settings import SadTalkerSettings - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class LipsyncVideoWithAnyTextClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def lipsync_tts( - self, - *, - text_prompt: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - tts_provider: typing.Optional[LipsyncTtsPageRequestTtsProvider] = OMIT, - uberduck_voice_name: typing.Optional[str] = OMIT, - uberduck_speaking_rate: typing.Optional[float] = OMIT, - google_voice_name: typing.Optional[str] = OMIT, - google_speaking_rate: typing.Optional[float] = OMIT, - google_pitch: typing.Optional[float] = OMIT, - bark_history_prompt: typing.Optional[str] = OMIT, - elevenlabs_voice_name: typing.Optional[str] = OMIT, - elevenlabs_api_key: typing.Optional[str] = OMIT, - elevenlabs_voice_id: typing.Optional[str] = OMIT, - elevenlabs_model: typing.Optional[str] = OMIT, - elevenlabs_stability: typing.Optional[float] = OMIT, - elevenlabs_similarity_boost: typing.Optional[float] = OMIT, - elevenlabs_style: typing.Optional[float] = OMIT, - elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, - azure_voice_name: typing.Optional[str] = OMIT, - openai_voice_name: typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] = OMIT, - openai_tts_model: typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] = OMIT, - input_face: typing.Optional[str] = OMIT, - face_padding_top: typing.Optional[int] = OMIT, - face_padding_bottom: typing.Optional[int] = OMIT, - face_padding_left: typing.Optional[int] = OMIT, - face_padding_right: typing.Optional[int] = OMIT, - sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT, - selected_model: typing.Optional[LipsyncTtsPageRequestSelectedModel] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> LipsyncTtsPageResponse: - """ - Parameters - ---------- - text_prompt : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - tts_provider : typing.Optional[LipsyncTtsPageRequestTtsProvider] - - uberduck_voice_name : typing.Optional[str] - - uberduck_speaking_rate : typing.Optional[float] - - google_voice_name : typing.Optional[str] - - google_speaking_rate : typing.Optional[float] - - google_pitch : typing.Optional[float] - - bark_history_prompt : typing.Optional[str] - - elevenlabs_voice_name : typing.Optional[str] - Use `elevenlabs_voice_id` instead - - elevenlabs_api_key : typing.Optional[str] - - elevenlabs_voice_id : typing.Optional[str] - - elevenlabs_model : typing.Optional[str] - - elevenlabs_stability : typing.Optional[float] - - elevenlabs_similarity_boost : typing.Optional[float] - - elevenlabs_style : typing.Optional[float] - - elevenlabs_speaker_boost : typing.Optional[bool] - - azure_voice_name : typing.Optional[str] - - openai_voice_name : typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] - - openai_tts_model : typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] - - input_face : typing.Optional[str] - - face_padding_top : typing.Optional[int] - - face_padding_bottom : typing.Optional[int] - - face_padding_left : typing.Optional[int] - - face_padding_right : typing.Optional[int] - - sadtalker_settings : typing.Optional[SadTalkerSettings] - - selected_model : typing.Optional[LipsyncTtsPageRequestSelectedModel] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - LipsyncTtsPageResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.lipsync_video_with_any_text.lipsync_tts( - text_prompt="text_prompt", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v2/LipsyncTTS/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "text_prompt": text_prompt, - "tts_provider": tts_provider, - "uberduck_voice_name": uberduck_voice_name, - "uberduck_speaking_rate": uberduck_speaking_rate, - "google_voice_name": google_voice_name, - "google_speaking_rate": google_speaking_rate, - "google_pitch": google_pitch, - "bark_history_prompt": bark_history_prompt, - "elevenlabs_voice_name": elevenlabs_voice_name, - "elevenlabs_api_key": elevenlabs_api_key, - "elevenlabs_voice_id": elevenlabs_voice_id, - "elevenlabs_model": elevenlabs_model, - "elevenlabs_stability": elevenlabs_stability, - "elevenlabs_similarity_boost": elevenlabs_similarity_boost, - "elevenlabs_style": elevenlabs_style, - "elevenlabs_speaker_boost": elevenlabs_speaker_boost, - "azure_voice_name": azure_voice_name, - "openai_voice_name": openai_voice_name, - "openai_tts_model": openai_tts_model, - "input_face": input_face, - "face_padding_top": face_padding_top, - "face_padding_bottom": face_padding_bottom, - "face_padding_left": face_padding_left, - "face_padding_right": face_padding_right, - "sadtalker_settings": sadtalker_settings, - "selected_model": selected_model, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(LipsyncTtsPageResponse, parse_obj_as(type_=LipsyncTtsPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def async_lipsync_tts( - self, - *, - text_prompt: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - tts_provider: typing.Optional[LipsyncTtsPageRequestTtsProvider] = OMIT, - uberduck_voice_name: typing.Optional[str] = OMIT, - uberduck_speaking_rate: typing.Optional[float] = OMIT, - google_voice_name: typing.Optional[str] = OMIT, - google_speaking_rate: typing.Optional[float] = OMIT, - google_pitch: typing.Optional[float] = OMIT, - bark_history_prompt: typing.Optional[str] = OMIT, - elevenlabs_voice_name: typing.Optional[str] = OMIT, - elevenlabs_api_key: typing.Optional[str] = OMIT, - elevenlabs_voice_id: typing.Optional[str] = OMIT, - elevenlabs_model: typing.Optional[str] = OMIT, - elevenlabs_stability: typing.Optional[float] = OMIT, - elevenlabs_similarity_boost: typing.Optional[float] = OMIT, - elevenlabs_style: typing.Optional[float] = OMIT, - elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, - azure_voice_name: typing.Optional[str] = OMIT, - openai_voice_name: typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] = OMIT, - openai_tts_model: typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] = OMIT, - input_face: typing.Optional[str] = OMIT, - face_padding_top: typing.Optional[int] = OMIT, - face_padding_bottom: typing.Optional[int] = OMIT, - face_padding_left: typing.Optional[int] = OMIT, - face_padding_right: typing.Optional[int] = OMIT, - sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT, - selected_model: typing.Optional[LipsyncTtsPageRequestSelectedModel] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - text_prompt : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - tts_provider : typing.Optional[LipsyncTtsPageRequestTtsProvider] - - uberduck_voice_name : typing.Optional[str] - - uberduck_speaking_rate : typing.Optional[float] - - google_voice_name : typing.Optional[str] - - google_speaking_rate : typing.Optional[float] - - google_pitch : typing.Optional[float] - - bark_history_prompt : typing.Optional[str] - - elevenlabs_voice_name : typing.Optional[str] - Use `elevenlabs_voice_id` instead - - elevenlabs_api_key : typing.Optional[str] - - elevenlabs_voice_id : typing.Optional[str] - - elevenlabs_model : typing.Optional[str] - - elevenlabs_stability : typing.Optional[float] - - elevenlabs_similarity_boost : typing.Optional[float] - - elevenlabs_style : typing.Optional[float] - - elevenlabs_speaker_boost : typing.Optional[bool] - - azure_voice_name : typing.Optional[str] - - openai_voice_name : typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] - - openai_tts_model : typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] - - input_face : typing.Optional[str] - - face_padding_top : typing.Optional[int] - - face_padding_bottom : typing.Optional[int] - - face_padding_left : typing.Optional[int] - - face_padding_right : typing.Optional[int] - - sadtalker_settings : typing.Optional[SadTalkerSettings] - - selected_model : typing.Optional[LipsyncTtsPageRequestSelectedModel] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.lipsync_video_with_any_text.async_lipsync_tts( - text_prompt="text_prompt", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/LipsyncTTS/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "text_prompt": text_prompt, - "tts_provider": tts_provider, - "uberduck_voice_name": uberduck_voice_name, - "uberduck_speaking_rate": uberduck_speaking_rate, - "google_voice_name": google_voice_name, - "google_speaking_rate": google_speaking_rate, - "google_pitch": google_pitch, - "bark_history_prompt": bark_history_prompt, - "elevenlabs_voice_name": elevenlabs_voice_name, - "elevenlabs_api_key": elevenlabs_api_key, - "elevenlabs_voice_id": elevenlabs_voice_id, - "elevenlabs_model": elevenlabs_model, - "elevenlabs_stability": elevenlabs_stability, - "elevenlabs_similarity_boost": elevenlabs_similarity_boost, - "elevenlabs_style": elevenlabs_style, - "elevenlabs_speaker_boost": elevenlabs_speaker_boost, - "azure_voice_name": azure_voice_name, - "openai_voice_name": openai_voice_name, - "openai_tts_model": openai_tts_model, - "input_face": input_face, - "face_padding_top": face_padding_top, - "face_padding_bottom": face_padding_bottom, - "face_padding_left": face_padding_left, - "face_padding_right": face_padding_right, - "sadtalker_settings": sadtalker_settings, - "selected_model": selected_model, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def status_lipsync_tts( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> LipsyncTtsPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - LipsyncTtsPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.lipsync_video_with_any_text.status_lipsync_tts( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/LipsyncTTS/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(LipsyncTtsPageStatusResponse, parse_obj_as(type_=LipsyncTtsPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncLipsyncVideoWithAnyTextClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def lipsync_tts( - self, - *, - text_prompt: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - tts_provider: typing.Optional[LipsyncTtsPageRequestTtsProvider] = OMIT, - uberduck_voice_name: typing.Optional[str] = OMIT, - uberduck_speaking_rate: typing.Optional[float] = OMIT, - google_voice_name: typing.Optional[str] = OMIT, - google_speaking_rate: typing.Optional[float] = OMIT, - google_pitch: typing.Optional[float] = OMIT, - bark_history_prompt: typing.Optional[str] = OMIT, - elevenlabs_voice_name: typing.Optional[str] = OMIT, - elevenlabs_api_key: typing.Optional[str] = OMIT, - elevenlabs_voice_id: typing.Optional[str] = OMIT, - elevenlabs_model: typing.Optional[str] = OMIT, - elevenlabs_stability: typing.Optional[float] = OMIT, - elevenlabs_similarity_boost: typing.Optional[float] = OMIT, - elevenlabs_style: typing.Optional[float] = OMIT, - elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, - azure_voice_name: typing.Optional[str] = OMIT, - openai_voice_name: typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] = OMIT, - openai_tts_model: typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] = OMIT, - input_face: typing.Optional[str] = OMIT, - face_padding_top: typing.Optional[int] = OMIT, - face_padding_bottom: typing.Optional[int] = OMIT, - face_padding_left: typing.Optional[int] = OMIT, - face_padding_right: typing.Optional[int] = OMIT, - sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT, - selected_model: typing.Optional[LipsyncTtsPageRequestSelectedModel] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> LipsyncTtsPageResponse: - """ - Parameters - ---------- - text_prompt : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - tts_provider : typing.Optional[LipsyncTtsPageRequestTtsProvider] - - uberduck_voice_name : typing.Optional[str] - - uberduck_speaking_rate : typing.Optional[float] - - google_voice_name : typing.Optional[str] - - google_speaking_rate : typing.Optional[float] - - google_pitch : typing.Optional[float] - - bark_history_prompt : typing.Optional[str] - - elevenlabs_voice_name : typing.Optional[str] - Use `elevenlabs_voice_id` instead - - elevenlabs_api_key : typing.Optional[str] - - elevenlabs_voice_id : typing.Optional[str] - - elevenlabs_model : typing.Optional[str] - - elevenlabs_stability : typing.Optional[float] - - elevenlabs_similarity_boost : typing.Optional[float] - - elevenlabs_style : typing.Optional[float] - - elevenlabs_speaker_boost : typing.Optional[bool] - - azure_voice_name : typing.Optional[str] - - openai_voice_name : typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] - - openai_tts_model : typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] - - input_face : typing.Optional[str] - - face_padding_top : typing.Optional[int] - - face_padding_bottom : typing.Optional[int] - - face_padding_left : typing.Optional[int] - - face_padding_right : typing.Optional[int] - - sadtalker_settings : typing.Optional[SadTalkerSettings] - - selected_model : typing.Optional[LipsyncTtsPageRequestSelectedModel] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - LipsyncTtsPageResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.lipsync_video_with_any_text.lipsync_tts( - text_prompt="text_prompt", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v2/LipsyncTTS/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "text_prompt": text_prompt, - "tts_provider": tts_provider, - "uberduck_voice_name": uberduck_voice_name, - "uberduck_speaking_rate": uberduck_speaking_rate, - "google_voice_name": google_voice_name, - "google_speaking_rate": google_speaking_rate, - "google_pitch": google_pitch, - "bark_history_prompt": bark_history_prompt, - "elevenlabs_voice_name": elevenlabs_voice_name, - "elevenlabs_api_key": elevenlabs_api_key, - "elevenlabs_voice_id": elevenlabs_voice_id, - "elevenlabs_model": elevenlabs_model, - "elevenlabs_stability": elevenlabs_stability, - "elevenlabs_similarity_boost": elevenlabs_similarity_boost, - "elevenlabs_style": elevenlabs_style, - "elevenlabs_speaker_boost": elevenlabs_speaker_boost, - "azure_voice_name": azure_voice_name, - "openai_voice_name": openai_voice_name, - "openai_tts_model": openai_tts_model, - "input_face": input_face, - "face_padding_top": face_padding_top, - "face_padding_bottom": face_padding_bottom, - "face_padding_left": face_padding_left, - "face_padding_right": face_padding_right, - "sadtalker_settings": sadtalker_settings, - "selected_model": selected_model, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(LipsyncTtsPageResponse, parse_obj_as(type_=LipsyncTtsPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def async_lipsync_tts( - self, - *, - text_prompt: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - tts_provider: typing.Optional[LipsyncTtsPageRequestTtsProvider] = OMIT, - uberduck_voice_name: typing.Optional[str] = OMIT, - uberduck_speaking_rate: typing.Optional[float] = OMIT, - google_voice_name: typing.Optional[str] = OMIT, - google_speaking_rate: typing.Optional[float] = OMIT, - google_pitch: typing.Optional[float] = OMIT, - bark_history_prompt: typing.Optional[str] = OMIT, - elevenlabs_voice_name: typing.Optional[str] = OMIT, - elevenlabs_api_key: typing.Optional[str] = OMIT, - elevenlabs_voice_id: typing.Optional[str] = OMIT, - elevenlabs_model: typing.Optional[str] = OMIT, - elevenlabs_stability: typing.Optional[float] = OMIT, - elevenlabs_similarity_boost: typing.Optional[float] = OMIT, - elevenlabs_style: typing.Optional[float] = OMIT, - elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, - azure_voice_name: typing.Optional[str] = OMIT, - openai_voice_name: typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] = OMIT, - openai_tts_model: typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] = OMIT, - input_face: typing.Optional[str] = OMIT, - face_padding_top: typing.Optional[int] = OMIT, - face_padding_bottom: typing.Optional[int] = OMIT, - face_padding_left: typing.Optional[int] = OMIT, - face_padding_right: typing.Optional[int] = OMIT, - sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT, - selected_model: typing.Optional[LipsyncTtsPageRequestSelectedModel] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - text_prompt : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - tts_provider : typing.Optional[LipsyncTtsPageRequestTtsProvider] - - uberduck_voice_name : typing.Optional[str] - - uberduck_speaking_rate : typing.Optional[float] - - google_voice_name : typing.Optional[str] - - google_speaking_rate : typing.Optional[float] - - google_pitch : typing.Optional[float] - - bark_history_prompt : typing.Optional[str] - - elevenlabs_voice_name : typing.Optional[str] - Use `elevenlabs_voice_id` instead - - elevenlabs_api_key : typing.Optional[str] - - elevenlabs_voice_id : typing.Optional[str] - - elevenlabs_model : typing.Optional[str] - - elevenlabs_stability : typing.Optional[float] - - elevenlabs_similarity_boost : typing.Optional[float] - - elevenlabs_style : typing.Optional[float] - - elevenlabs_speaker_boost : typing.Optional[bool] - - azure_voice_name : typing.Optional[str] - - openai_voice_name : typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] - - openai_tts_model : typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] - - input_face : typing.Optional[str] - - face_padding_top : typing.Optional[int] - - face_padding_bottom : typing.Optional[int] - - face_padding_left : typing.Optional[int] - - face_padding_right : typing.Optional[int] - - sadtalker_settings : typing.Optional[SadTalkerSettings] - - selected_model : typing.Optional[LipsyncTtsPageRequestSelectedModel] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.lipsync_video_with_any_text.async_lipsync_tts( - text_prompt="text_prompt", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/LipsyncTTS/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "text_prompt": text_prompt, - "tts_provider": tts_provider, - "uberduck_voice_name": uberduck_voice_name, - "uberduck_speaking_rate": uberduck_speaking_rate, - "google_voice_name": google_voice_name, - "google_speaking_rate": google_speaking_rate, - "google_pitch": google_pitch, - "bark_history_prompt": bark_history_prompt, - "elevenlabs_voice_name": elevenlabs_voice_name, - "elevenlabs_api_key": elevenlabs_api_key, - "elevenlabs_voice_id": elevenlabs_voice_id, - "elevenlabs_model": elevenlabs_model, - "elevenlabs_stability": elevenlabs_stability, - "elevenlabs_similarity_boost": elevenlabs_similarity_boost, - "elevenlabs_style": elevenlabs_style, - "elevenlabs_speaker_boost": elevenlabs_speaker_boost, - "azure_voice_name": azure_voice_name, - "openai_voice_name": openai_voice_name, - "openai_tts_model": openai_tts_model, - "input_face": input_face, - "face_padding_top": face_padding_top, - "face_padding_bottom": face_padding_bottom, - "face_padding_left": face_padding_left, - "face_padding_right": face_padding_right, - "sadtalker_settings": sadtalker_settings, - "selected_model": selected_model, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def status_lipsync_tts( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> LipsyncTtsPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - LipsyncTtsPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.lipsync_video_with_any_text.status_lipsync_tts( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/LipsyncTTS/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(LipsyncTtsPageStatusResponse, parse_obj_as(type_=LipsyncTtsPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/misc/client.py b/src/gooey/misc/client.py index c8f71a1..39675bf 100644 --- a/src/gooey/misc/client.py +++ b/src/gooey/misc/client.py @@ -38,7 +38,6 @@ def get_balance(self, *, request_options: typing.Optional[RequestOptions] = None from gooey import Gooey client = Gooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) client.misc.get_balance() @@ -49,10 +48,6 @@ def get_balance(self, *, request_options: typing.Optional[RequestOptions] = None try: if 200 <= _response.status_code < 300: return typing.cast(BalanceResponse, parse_obj_as(type_=BalanceResponse, object_=_response.json())) # type: ignore - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -109,7 +104,6 @@ def video_bots_broadcast( from gooey import Gooey client = Gooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) client.misc.video_bots_broadcast( @@ -143,37 +137,6 @@ def video_bots_broadcast( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def health(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: - """ - Parameters - ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.Any - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.misc.health() - """ - _response = self._client_wrapper.httpx_client.request(method="GET", request_options=request_options) - try: - if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - class AsyncMiscClient: def __init__(self, *, client_wrapper: AsyncClientWrapper): @@ -198,7 +161,6 @@ async def get_balance(self, *, request_options: typing.Optional[RequestOptions] from gooey import AsyncGooey client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) @@ -215,10 +177,6 @@ async def main() -> None: try: if 200 <= _response.status_code < 300: return typing.cast(BalanceResponse, parse_obj_as(type_=BalanceResponse, object_=_response.json())) # type: ignore - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -277,7 +235,6 @@ async def video_bots_broadcast( from gooey import AsyncGooey client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) @@ -316,42 +273,3 @@ async def main() -> None: except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - - async def health(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: - """ - Parameters - ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.Any - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.misc.health() - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request(method="GET", request_options=request_options) - try: - if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/people_also_ask_answers_from_a_doc/__init__.py b/src/gooey/people_also_ask_answers_from_a_doc/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/people_also_ask_answers_from_a_doc/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/people_also_ask_answers_from_a_doc/client.py b/src/gooey/people_also_ask_answers_from_a_doc/client.py deleted file mode 100644 index f2588e1..0000000 --- a/src/gooey/people_also_ask_answers_from_a_doc/client.py +++ /dev/null @@ -1,818 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.internal_server_error import InternalServerError -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3 -from ..types.failed_reponse_model_v2 import FailedReponseModelV2 -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError -from ..types.recipe_function import RecipeFunction -from ..types.related_qn_a_doc_page_request_citation_style import RelatedQnADocPageRequestCitationStyle -from ..types.related_qn_a_doc_page_request_embedding_model import RelatedQnADocPageRequestEmbeddingModel -from ..types.related_qn_a_doc_page_request_keyword_query import RelatedQnADocPageRequestKeywordQuery -from ..types.related_qn_a_doc_page_request_selected_model import RelatedQnADocPageRequestSelectedModel -from ..types.related_qn_a_doc_page_response import RelatedQnADocPageResponse -from ..types.related_qn_a_doc_page_status_response import RelatedQnADocPageStatusResponse -from ..types.run_settings import RunSettings -from ..types.serp_search_location import SerpSearchLocation -from ..types.serp_search_type import SerpSearchType - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class PeopleAlsoAskAnswersFromADocClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def related_qna_maker_doc( - self, - *, - search_query: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - keyword_query: typing.Optional[RelatedQnADocPageRequestKeywordQuery] = OMIT, - documents: typing.Optional[typing.Sequence[str]] = OMIT, - max_references: typing.Optional[int] = OMIT, - max_context_words: typing.Optional[int] = OMIT, - scroll_jump: typing.Optional[int] = OMIT, - doc_extract_url: typing.Optional[str] = OMIT, - embedding_model: typing.Optional[RelatedQnADocPageRequestEmbeddingModel] = OMIT, - dense_weight: typing.Optional[float] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - query_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[RelatedQnADocPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - citation_style: typing.Optional[RelatedQnADocPageRequestCitationStyle] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, - scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - serp_search_type: typing.Optional[SerpSearchType] = OMIT, - scaleserp_search_field: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> RelatedQnADocPageResponse: - """ - Parameters - ---------- - search_query : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - keyword_query : typing.Optional[RelatedQnADocPageRequestKeywordQuery] - - documents : typing.Optional[typing.Sequence[str]] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - doc_extract_url : typing.Optional[str] - - embedding_model : typing.Optional[RelatedQnADocPageRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - selected_model : typing.Optional[RelatedQnADocPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - citation_style : typing.Optional[RelatedQnADocPageRequestCitationStyle] - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.Sequence[str]] - DEPRECATED: use `serp_search_location` instead - - serp_search_type : typing.Optional[SerpSearchType] - - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - RelatedQnADocPageResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.people_also_ask_answers_from_a_doc.related_qna_maker_doc( - search_query="search_query", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v2/related-qna-maker-doc/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "search_query": search_query, - "keyword_query": keyword_query, - "documents": documents, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "doc_extract_url": doc_extract_url, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "citation_style": citation_style, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(RelatedQnADocPageResponse, parse_obj_as(type_=RelatedQnADocPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def async_related_qna_maker_doc( - self, - *, - search_query: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - keyword_query: typing.Optional[RelatedQnADocPageRequestKeywordQuery] = OMIT, - documents: typing.Optional[typing.Sequence[str]] = OMIT, - max_references: typing.Optional[int] = OMIT, - max_context_words: typing.Optional[int] = OMIT, - scroll_jump: typing.Optional[int] = OMIT, - doc_extract_url: typing.Optional[str] = OMIT, - embedding_model: typing.Optional[RelatedQnADocPageRequestEmbeddingModel] = OMIT, - dense_weight: typing.Optional[float] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - query_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[RelatedQnADocPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - citation_style: typing.Optional[RelatedQnADocPageRequestCitationStyle] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, - scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - serp_search_type: typing.Optional[SerpSearchType] = OMIT, - scaleserp_search_field: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - search_query : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - keyword_query : typing.Optional[RelatedQnADocPageRequestKeywordQuery] - - documents : typing.Optional[typing.Sequence[str]] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - doc_extract_url : typing.Optional[str] - - embedding_model : typing.Optional[RelatedQnADocPageRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - selected_model : typing.Optional[RelatedQnADocPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - citation_style : typing.Optional[RelatedQnADocPageRequestCitationStyle] - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.Sequence[str]] - DEPRECATED: use `serp_search_location` instead - - serp_search_type : typing.Optional[SerpSearchType] - - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.people_also_ask_answers_from_a_doc.async_related_qna_maker_doc( - search_query="search_query", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/related-qna-maker-doc/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "search_query": search_query, - "keyword_query": keyword_query, - "documents": documents, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "doc_extract_url": doc_extract_url, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "citation_style": citation_style, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def status_related_qna_maker_doc( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> RelatedQnADocPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - RelatedQnADocPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.people_also_ask_answers_from_a_doc.status_related_qna_maker_doc( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/related-qna-maker-doc/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(RelatedQnADocPageStatusResponse, parse_obj_as(type_=RelatedQnADocPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncPeopleAlsoAskAnswersFromADocClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def related_qna_maker_doc( - self, - *, - search_query: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - keyword_query: typing.Optional[RelatedQnADocPageRequestKeywordQuery] = OMIT, - documents: typing.Optional[typing.Sequence[str]] = OMIT, - max_references: typing.Optional[int] = OMIT, - max_context_words: typing.Optional[int] = OMIT, - scroll_jump: typing.Optional[int] = OMIT, - doc_extract_url: typing.Optional[str] = OMIT, - embedding_model: typing.Optional[RelatedQnADocPageRequestEmbeddingModel] = OMIT, - dense_weight: typing.Optional[float] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - query_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[RelatedQnADocPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - citation_style: typing.Optional[RelatedQnADocPageRequestCitationStyle] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, - scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - serp_search_type: typing.Optional[SerpSearchType] = OMIT, - scaleserp_search_field: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> RelatedQnADocPageResponse: - """ - Parameters - ---------- - search_query : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - keyword_query : typing.Optional[RelatedQnADocPageRequestKeywordQuery] - - documents : typing.Optional[typing.Sequence[str]] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - doc_extract_url : typing.Optional[str] - - embedding_model : typing.Optional[RelatedQnADocPageRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - selected_model : typing.Optional[RelatedQnADocPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - citation_style : typing.Optional[RelatedQnADocPageRequestCitationStyle] - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.Sequence[str]] - DEPRECATED: use `serp_search_location` instead - - serp_search_type : typing.Optional[SerpSearchType] - - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - RelatedQnADocPageResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.people_also_ask_answers_from_a_doc.related_qna_maker_doc( - search_query="search_query", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v2/related-qna-maker-doc/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "search_query": search_query, - "keyword_query": keyword_query, - "documents": documents, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "doc_extract_url": doc_extract_url, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "citation_style": citation_style, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(RelatedQnADocPageResponse, parse_obj_as(type_=RelatedQnADocPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def async_related_qna_maker_doc( - self, - *, - search_query: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - keyword_query: typing.Optional[RelatedQnADocPageRequestKeywordQuery] = OMIT, - documents: typing.Optional[typing.Sequence[str]] = OMIT, - max_references: typing.Optional[int] = OMIT, - max_context_words: typing.Optional[int] = OMIT, - scroll_jump: typing.Optional[int] = OMIT, - doc_extract_url: typing.Optional[str] = OMIT, - embedding_model: typing.Optional[RelatedQnADocPageRequestEmbeddingModel] = OMIT, - dense_weight: typing.Optional[float] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - query_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[RelatedQnADocPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - citation_style: typing.Optional[RelatedQnADocPageRequestCitationStyle] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, - scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - serp_search_type: typing.Optional[SerpSearchType] = OMIT, - scaleserp_search_field: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - search_query : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - keyword_query : typing.Optional[RelatedQnADocPageRequestKeywordQuery] - - documents : typing.Optional[typing.Sequence[str]] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - doc_extract_url : typing.Optional[str] - - embedding_model : typing.Optional[RelatedQnADocPageRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - selected_model : typing.Optional[RelatedQnADocPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - citation_style : typing.Optional[RelatedQnADocPageRequestCitationStyle] - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.Sequence[str]] - DEPRECATED: use `serp_search_location` instead - - serp_search_type : typing.Optional[SerpSearchType] - - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.people_also_ask_answers_from_a_doc.async_related_qna_maker_doc( - search_query="search_query", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/related-qna-maker-doc/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "search_query": search_query, - "keyword_query": keyword_query, - "documents": documents, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "doc_extract_url": doc_extract_url, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "citation_style": citation_style, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def status_related_qna_maker_doc( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> RelatedQnADocPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - RelatedQnADocPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.people_also_ask_answers_from_a_doc.status_related_qna_maker_doc( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/related-qna-maker-doc/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(RelatedQnADocPageStatusResponse, parse_obj_as(type_=RelatedQnADocPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/profile_lookup_gpt3for_ai_personalized_emails/__init__.py b/src/gooey/profile_lookup_gpt3for_ai_personalized_emails/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/profile_lookup_gpt3for_ai_personalized_emails/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/profile_lookup_gpt3for_ai_personalized_emails/client.py b/src/gooey/profile_lookup_gpt3for_ai_personalized_emails/client.py deleted file mode 100644 index 746b656..0000000 --- a/src/gooey/profile_lookup_gpt3for_ai_personalized_emails/client.py +++ /dev/null @@ -1,573 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.internal_server_error import InternalServerError -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3 -from ..types.failed_reponse_model_v2 import FailedReponseModelV2 -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError -from ..types.recipe_function import RecipeFunction -from ..types.run_settings import RunSettings -from ..types.social_lookup_email_page_request_selected_model import SocialLookupEmailPageRequestSelectedModel -from ..types.social_lookup_email_page_response import SocialLookupEmailPageResponse -from ..types.social_lookup_email_page_status_response import SocialLookupEmailPageStatusResponse - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class ProfileLookupGpt3ForAiPersonalizedEmailsClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def social_lookup_email( - self, - *, - email_address: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - input_prompt: typing.Optional[str] = OMIT, - selected_model: typing.Optional[SocialLookupEmailPageRequestSelectedModel] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> SocialLookupEmailPageResponse: - """ - Parameters - ---------- - email_address : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - input_prompt : typing.Optional[str] - - selected_model : typing.Optional[SocialLookupEmailPageRequestSelectedModel] - - num_outputs : typing.Optional[int] - - avoid_repetition : typing.Optional[bool] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SocialLookupEmailPageResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.profile_lookup_gpt3for_ai_personalized_emails.social_lookup_email( - email_address="email_address", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v2/SocialLookupEmail/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "email_address": email_address, - "input_prompt": input_prompt, - "selected_model": selected_model, - "num_outputs": num_outputs, - "avoid_repetition": avoid_repetition, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(SocialLookupEmailPageResponse, parse_obj_as(type_=SocialLookupEmailPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def async_social_lookup_email( - self, - *, - email_address: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - input_prompt: typing.Optional[str] = OMIT, - selected_model: typing.Optional[SocialLookupEmailPageRequestSelectedModel] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - email_address : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - input_prompt : typing.Optional[str] - - selected_model : typing.Optional[SocialLookupEmailPageRequestSelectedModel] - - num_outputs : typing.Optional[int] - - avoid_repetition : typing.Optional[bool] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.profile_lookup_gpt3for_ai_personalized_emails.async_social_lookup_email( - email_address="email_address", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/SocialLookupEmail/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "email_address": email_address, - "input_prompt": input_prompt, - "selected_model": selected_model, - "num_outputs": num_outputs, - "avoid_repetition": avoid_repetition, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def status_social_lookup_email( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> SocialLookupEmailPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SocialLookupEmailPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.profile_lookup_gpt3for_ai_personalized_emails.status_social_lookup_email( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/SocialLookupEmail/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(SocialLookupEmailPageStatusResponse, parse_obj_as(type_=SocialLookupEmailPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncProfileLookupGpt3ForAiPersonalizedEmailsClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def social_lookup_email( - self, - *, - email_address: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - input_prompt: typing.Optional[str] = OMIT, - selected_model: typing.Optional[SocialLookupEmailPageRequestSelectedModel] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> SocialLookupEmailPageResponse: - """ - Parameters - ---------- - email_address : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - input_prompt : typing.Optional[str] - - selected_model : typing.Optional[SocialLookupEmailPageRequestSelectedModel] - - num_outputs : typing.Optional[int] - - avoid_repetition : typing.Optional[bool] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SocialLookupEmailPageResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.profile_lookup_gpt3for_ai_personalized_emails.social_lookup_email( - email_address="email_address", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v2/SocialLookupEmail/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "email_address": email_address, - "input_prompt": input_prompt, - "selected_model": selected_model, - "num_outputs": num_outputs, - "avoid_repetition": avoid_repetition, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(SocialLookupEmailPageResponse, parse_obj_as(type_=SocialLookupEmailPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def async_social_lookup_email( - self, - *, - email_address: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - input_prompt: typing.Optional[str] = OMIT, - selected_model: typing.Optional[SocialLookupEmailPageRequestSelectedModel] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - email_address : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - input_prompt : typing.Optional[str] - - selected_model : typing.Optional[SocialLookupEmailPageRequestSelectedModel] - - num_outputs : typing.Optional[int] - - avoid_repetition : typing.Optional[bool] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.profile_lookup_gpt3for_ai_personalized_emails.async_social_lookup_email( - email_address="email_address", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/SocialLookupEmail/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "email_address": email_address, - "input_prompt": input_prompt, - "selected_model": selected_model, - "num_outputs": num_outputs, - "avoid_repetition": avoid_repetition, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def status_social_lookup_email( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> SocialLookupEmailPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SocialLookupEmailPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.profile_lookup_gpt3for_ai_personalized_emails.status_social_lookup_email( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/SocialLookupEmail/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(SocialLookupEmailPageStatusResponse, parse_obj_as(type_=SocialLookupEmailPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/render_image_search_results_with_ai/__init__.py b/src/gooey/render_image_search_results_with_ai/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/render_image_search_results_with_ai/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/render_image_search_results_with_ai/client.py b/src/gooey/render_image_search_results_with_ai/client.py deleted file mode 100644 index 53c65f2..0000000 --- a/src/gooey/render_image_search_results_with_ai/client.py +++ /dev/null @@ -1,662 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.internal_server_error import InternalServerError -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3 -from ..types.failed_reponse_model_v2 import FailedReponseModelV2 -from ..types.generic_error_response import GenericErrorResponse -from ..types.google_image_gen_page_request_selected_model import GoogleImageGenPageRequestSelectedModel -from ..types.google_image_gen_page_response import GoogleImageGenPageResponse -from ..types.google_image_gen_page_status_response import GoogleImageGenPageStatusResponse -from ..types.http_validation_error import HttpValidationError -from ..types.recipe_function import RecipeFunction -from ..types.run_settings import RunSettings -from ..types.serp_search_location import SerpSearchLocation - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class RenderImageSearchResultsWithAiClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def google_image_gen( - self, - *, - search_query: str, - text_prompt: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, - scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - selected_model: typing.Optional[GoogleImageGenPageRequestSelectedModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - prompt_strength: typing.Optional[float] = OMIT, - sd2upscaling: typing.Optional[bool] = OMIT, - seed: typing.Optional[int] = OMIT, - image_guidance_scale: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> GoogleImageGenPageResponse: - """ - Parameters - ---------- - search_query : str - - text_prompt : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.Sequence[str]] - DEPRECATED: use `serp_search_location` instead - - selected_model : typing.Optional[GoogleImageGenPageRequestSelectedModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - prompt_strength : typing.Optional[float] - - sd2upscaling : typing.Optional[bool] - - seed : typing.Optional[int] - - image_guidance_scale : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - GoogleImageGenPageResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.render_image_search_results_with_ai.google_image_gen( - search_query="search_query", - text_prompt="text_prompt", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v2/GoogleImageGen/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "search_query": search_query, - "text_prompt": text_prompt, - "selected_model": selected_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "guidance_scale": guidance_scale, - "prompt_strength": prompt_strength, - "sd_2_upscaling": sd2upscaling, - "seed": seed, - "image_guidance_scale": image_guidance_scale, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(GoogleImageGenPageResponse, parse_obj_as(type_=GoogleImageGenPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def async_google_image_gen( - self, - *, - search_query: str, - text_prompt: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, - scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - selected_model: typing.Optional[GoogleImageGenPageRequestSelectedModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - prompt_strength: typing.Optional[float] = OMIT, - sd2upscaling: typing.Optional[bool] = OMIT, - seed: typing.Optional[int] = OMIT, - image_guidance_scale: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - search_query : str - - text_prompt : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.Sequence[str]] - DEPRECATED: use `serp_search_location` instead - - selected_model : typing.Optional[GoogleImageGenPageRequestSelectedModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - prompt_strength : typing.Optional[float] - - sd2upscaling : typing.Optional[bool] - - seed : typing.Optional[int] - - image_guidance_scale : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.render_image_search_results_with_ai.async_google_image_gen( - search_query="search_query", - text_prompt="text_prompt", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/GoogleImageGen/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "search_query": search_query, - "text_prompt": text_prompt, - "selected_model": selected_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "guidance_scale": guidance_scale, - "prompt_strength": prompt_strength, - "sd_2_upscaling": sd2upscaling, - "seed": seed, - "image_guidance_scale": image_guidance_scale, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def status_google_image_gen( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> GoogleImageGenPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - GoogleImageGenPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.render_image_search_results_with_ai.status_google_image_gen( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/GoogleImageGen/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(GoogleImageGenPageStatusResponse, parse_obj_as(type_=GoogleImageGenPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncRenderImageSearchResultsWithAiClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def google_image_gen( - self, - *, - search_query: str, - text_prompt: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, - scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - selected_model: typing.Optional[GoogleImageGenPageRequestSelectedModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - prompt_strength: typing.Optional[float] = OMIT, - sd2upscaling: typing.Optional[bool] = OMIT, - seed: typing.Optional[int] = OMIT, - image_guidance_scale: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> GoogleImageGenPageResponse: - """ - Parameters - ---------- - search_query : str - - text_prompt : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.Sequence[str]] - DEPRECATED: use `serp_search_location` instead - - selected_model : typing.Optional[GoogleImageGenPageRequestSelectedModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - prompt_strength : typing.Optional[float] - - sd2upscaling : typing.Optional[bool] - - seed : typing.Optional[int] - - image_guidance_scale : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - GoogleImageGenPageResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.render_image_search_results_with_ai.google_image_gen( - search_query="search_query", - text_prompt="text_prompt", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v2/GoogleImageGen/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "search_query": search_query, - "text_prompt": text_prompt, - "selected_model": selected_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "guidance_scale": guidance_scale, - "prompt_strength": prompt_strength, - "sd_2_upscaling": sd2upscaling, - "seed": seed, - "image_guidance_scale": image_guidance_scale, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(GoogleImageGenPageResponse, parse_obj_as(type_=GoogleImageGenPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def async_google_image_gen( - self, - *, - search_query: str, - text_prompt: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, - scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - selected_model: typing.Optional[GoogleImageGenPageRequestSelectedModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - prompt_strength: typing.Optional[float] = OMIT, - sd2upscaling: typing.Optional[bool] = OMIT, - seed: typing.Optional[int] = OMIT, - image_guidance_scale: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - search_query : str - - text_prompt : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.Sequence[str]] - DEPRECATED: use `serp_search_location` instead - - selected_model : typing.Optional[GoogleImageGenPageRequestSelectedModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - prompt_strength : typing.Optional[float] - - sd2upscaling : typing.Optional[bool] - - seed : typing.Optional[int] - - image_guidance_scale : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.render_image_search_results_with_ai.async_google_image_gen( - search_query="search_query", - text_prompt="text_prompt", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/GoogleImageGen/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "search_query": search_query, - "text_prompt": text_prompt, - "selected_model": selected_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "guidance_scale": guidance_scale, - "prompt_strength": prompt_strength, - "sd_2_upscaling": sd2upscaling, - "seed": seed, - "image_guidance_scale": image_guidance_scale, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def status_google_image_gen( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> GoogleImageGenPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - GoogleImageGenPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.render_image_search_results_with_ai.status_google_image_gen( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/GoogleImageGen/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(GoogleImageGenPageStatusResponse, parse_obj_as(type_=GoogleImageGenPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/search_your_docs_with_gpt/__init__.py b/src/gooey/search_your_docs_with_gpt/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/search_your_docs_with_gpt/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/search_your_docs_with_gpt/client.py b/src/gooey/search_your_docs_with_gpt/client.py deleted file mode 100644 index 0bd64e2..0000000 --- a/src/gooey/search_your_docs_with_gpt/client.py +++ /dev/null @@ -1,744 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.internal_server_error import InternalServerError -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3 -from ..types.doc_search_page_request_citation_style import DocSearchPageRequestCitationStyle -from ..types.doc_search_page_request_embedding_model import DocSearchPageRequestEmbeddingModel -from ..types.doc_search_page_request_keyword_query import DocSearchPageRequestKeywordQuery -from ..types.doc_search_page_request_selected_model import DocSearchPageRequestSelectedModel -from ..types.doc_search_page_response import DocSearchPageResponse -from ..types.doc_search_page_status_response import DocSearchPageStatusResponse -from ..types.failed_reponse_model_v2 import FailedReponseModelV2 -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError -from ..types.recipe_function import RecipeFunction -from ..types.run_settings import RunSettings - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class SearchYourDocsWithGptClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def doc_search( - self, - *, - search_query: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - keyword_query: typing.Optional[DocSearchPageRequestKeywordQuery] = OMIT, - documents: typing.Optional[typing.Sequence[str]] = OMIT, - max_references: typing.Optional[int] = OMIT, - max_context_words: typing.Optional[int] = OMIT, - scroll_jump: typing.Optional[int] = OMIT, - doc_extract_url: typing.Optional[str] = OMIT, - embedding_model: typing.Optional[DocSearchPageRequestEmbeddingModel] = OMIT, - dense_weight: typing.Optional[float] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - query_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[DocSearchPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - citation_style: typing.Optional[DocSearchPageRequestCitationStyle] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> DocSearchPageResponse: - """ - Parameters - ---------- - search_query : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - keyword_query : typing.Optional[DocSearchPageRequestKeywordQuery] - - documents : typing.Optional[typing.Sequence[str]] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - doc_extract_url : typing.Optional[str] - - embedding_model : typing.Optional[DocSearchPageRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - selected_model : typing.Optional[DocSearchPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - citation_style : typing.Optional[DocSearchPageRequestCitationStyle] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DocSearchPageResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.search_your_docs_with_gpt.doc_search( - search_query="search_query", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v2/doc-search/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "search_query": search_query, - "keyword_query": keyword_query, - "documents": documents, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "doc_extract_url": doc_extract_url, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "citation_style": citation_style, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(DocSearchPageResponse, parse_obj_as(type_=DocSearchPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def async_doc_search( - self, - *, - search_query: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - keyword_query: typing.Optional[DocSearchPageRequestKeywordQuery] = OMIT, - documents: typing.Optional[typing.Sequence[str]] = OMIT, - max_references: typing.Optional[int] = OMIT, - max_context_words: typing.Optional[int] = OMIT, - scroll_jump: typing.Optional[int] = OMIT, - doc_extract_url: typing.Optional[str] = OMIT, - embedding_model: typing.Optional[DocSearchPageRequestEmbeddingModel] = OMIT, - dense_weight: typing.Optional[float] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - query_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[DocSearchPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - citation_style: typing.Optional[DocSearchPageRequestCitationStyle] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - search_query : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - keyword_query : typing.Optional[DocSearchPageRequestKeywordQuery] - - documents : typing.Optional[typing.Sequence[str]] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - doc_extract_url : typing.Optional[str] - - embedding_model : typing.Optional[DocSearchPageRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - selected_model : typing.Optional[DocSearchPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - citation_style : typing.Optional[DocSearchPageRequestCitationStyle] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.search_your_docs_with_gpt.async_doc_search( - search_query="search_query", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/doc-search/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "search_query": search_query, - "keyword_query": keyword_query, - "documents": documents, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "doc_extract_url": doc_extract_url, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "citation_style": citation_style, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def status_doc_search( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> DocSearchPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DocSearchPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.search_your_docs_with_gpt.status_doc_search( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/doc-search/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(DocSearchPageStatusResponse, parse_obj_as(type_=DocSearchPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncSearchYourDocsWithGptClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def doc_search( - self, - *, - search_query: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - keyword_query: typing.Optional[DocSearchPageRequestKeywordQuery] = OMIT, - documents: typing.Optional[typing.Sequence[str]] = OMIT, - max_references: typing.Optional[int] = OMIT, - max_context_words: typing.Optional[int] = OMIT, - scroll_jump: typing.Optional[int] = OMIT, - doc_extract_url: typing.Optional[str] = OMIT, - embedding_model: typing.Optional[DocSearchPageRequestEmbeddingModel] = OMIT, - dense_weight: typing.Optional[float] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - query_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[DocSearchPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - citation_style: typing.Optional[DocSearchPageRequestCitationStyle] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> DocSearchPageResponse: - """ - Parameters - ---------- - search_query : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - keyword_query : typing.Optional[DocSearchPageRequestKeywordQuery] - - documents : typing.Optional[typing.Sequence[str]] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - doc_extract_url : typing.Optional[str] - - embedding_model : typing.Optional[DocSearchPageRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - selected_model : typing.Optional[DocSearchPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - citation_style : typing.Optional[DocSearchPageRequestCitationStyle] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DocSearchPageResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.search_your_docs_with_gpt.doc_search( - search_query="search_query", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v2/doc-search/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "search_query": search_query, - "keyword_query": keyword_query, - "documents": documents, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "doc_extract_url": doc_extract_url, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "citation_style": citation_style, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(DocSearchPageResponse, parse_obj_as(type_=DocSearchPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def async_doc_search( - self, - *, - search_query: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - keyword_query: typing.Optional[DocSearchPageRequestKeywordQuery] = OMIT, - documents: typing.Optional[typing.Sequence[str]] = OMIT, - max_references: typing.Optional[int] = OMIT, - max_context_words: typing.Optional[int] = OMIT, - scroll_jump: typing.Optional[int] = OMIT, - doc_extract_url: typing.Optional[str] = OMIT, - embedding_model: typing.Optional[DocSearchPageRequestEmbeddingModel] = OMIT, - dense_weight: typing.Optional[float] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - query_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[DocSearchPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - citation_style: typing.Optional[DocSearchPageRequestCitationStyle] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - search_query : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - keyword_query : typing.Optional[DocSearchPageRequestKeywordQuery] - - documents : typing.Optional[typing.Sequence[str]] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - doc_extract_url : typing.Optional[str] - - embedding_model : typing.Optional[DocSearchPageRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - selected_model : typing.Optional[DocSearchPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - citation_style : typing.Optional[DocSearchPageRequestCitationStyle] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.search_your_docs_with_gpt.async_doc_search( - search_query="search_query", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/doc-search/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "search_query": search_query, - "keyword_query": keyword_query, - "documents": documents, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "doc_extract_url": doc_extract_url, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "citation_style": citation_style, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def status_doc_search( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> DocSearchPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DocSearchPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.search_your_docs_with_gpt.status_doc_search( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/doc-search/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(DocSearchPageStatusResponse, parse_obj_as(type_=DocSearchPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/smart_gpt/__init__.py b/src/gooey/smart_gpt/__init__.py index f3ea265..fce5f3e 100644 --- a/src/gooey/smart_gpt/__init__.py +++ b/src/gooey/smart_gpt/__init__.py @@ -1,2 +1,5 @@ # This file was auto-generated by Fern from our API Definition. +from .types import SmartGptPageRequestResponseFormatType, SmartGptPageRequestSelectedModel + +__all__ = ["SmartGptPageRequestResponseFormatType", "SmartGptPageRequestSelectedModel"] diff --git a/src/gooey/smart_gpt/client.py b/src/gooey/smart_gpt/client.py index 7d12ad6..426212b 100644 --- a/src/gooey/smart_gpt/client.py +++ b/src/gooey/smart_gpt/client.py @@ -7,19 +7,16 @@ from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper from ..core.pydantic_utilities import parse_obj_as from ..core.request_options import RequestOptions -from ..errors.internal_server_error import InternalServerError from ..errors.payment_required_error import PaymentRequiredError from ..errors.too_many_requests_error import TooManyRequestsError from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3 -from ..types.failed_reponse_model_v2 import FailedReponseModelV2 from ..types.generic_error_response import GenericErrorResponse from ..types.http_validation_error import HttpValidationError from ..types.recipe_function import RecipeFunction from ..types.run_settings import RunSettings -from ..types.smart_gpt_page_request_selected_model import SmartGptPageRequestSelectedModel from ..types.smart_gpt_page_response import SmartGptPageResponse -from ..types.smart_gpt_page_status_response import SmartGptPageStatusResponse +from .types.smart_gpt_page_request_response_format_type import SmartGptPageRequestResponseFormatType +from .types.smart_gpt_page_request_selected_model import SmartGptPageRequestSelectedModel # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -29,10 +26,11 @@ class SmartGptClient: def __init__(self, *, client_wrapper: SyncClientWrapper): self._client_wrapper = client_wrapper - def post( + def async_smart_gpt( self, *, input_prompt: str, + example_id: typing.Optional[str] = None, functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, cot_prompt: typing.Optional[str] = OMIT, @@ -44,6 +42,7 @@ def post( quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[SmartGptPageRequestResponseFormatType] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None ) -> SmartGptPageResponse: @@ -52,118 +51,7 @@ def post( ---------- input_prompt : str - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - cot_prompt : typing.Optional[str] - - reflexion_prompt : typing.Optional[str] - - dera_prompt : typing.Optional[str] - - selected_model : typing.Optional[SmartGptPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SmartGptPageResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.smart_gpt.post( - input_prompt="input_prompt", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v2/SmartGPT/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "input_prompt": input_prompt, - "cot_prompt": cot_prompt, - "reflexion_prompt": reflexion_prompt, - "dera_prompt": dera_prompt, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(SmartGptPageResponse, parse_obj_as(type_=SmartGptPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def async_smart_gpt( - self, - *, - input_prompt: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - cot_prompt: typing.Optional[str] = OMIT, - reflexion_prompt: typing.Optional[str] = OMIT, - dera_prompt: typing.Optional[str] = OMIT, - selected_model: typing.Optional[SmartGptPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - input_prompt : str + example_id : typing.Optional[str] functions : typing.Optional[typing.Sequence[RecipeFunction]] @@ -188,6 +76,8 @@ def async_smart_gpt( sampling_temperature : typing.Optional[float] + response_format_type : typing.Optional[SmartGptPageRequestResponseFormatType] + settings : typing.Optional[RunSettings] request_options : typing.Optional[RequestOptions] @@ -195,7 +85,7 @@ def async_smart_gpt( Returns ------- - AsyncApiResponseModelV3 + SmartGptPageResponse Successful Response Examples @@ -203,7 +93,6 @@ def async_smart_gpt( from gooey import Gooey client = Gooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) client.smart_gpt.async_smart_gpt( @@ -211,8 +100,9 @@ def async_smart_gpt( ) """ _response = self._client_wrapper.httpx_client.request( - "v3/SmartGPT/async/", + "v3/SmartGPT/async", method="POST", + params={"example_id": example_id}, json={ "functions": functions, "variables": variables, @@ -226,6 +116,7 @@ def async_smart_gpt( "quality": quality, "max_tokens": max_tokens, "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, "settings": settings, }, request_options=request_options, @@ -233,7 +124,7 @@ def async_smart_gpt( ) try: if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore + return typing.cast(SmartGptPageResponse, parse_obj_as(type_=SmartGptPageResponse, object_=_response.json())) # type: ignore if _response.status_code == 402: raise PaymentRequiredError( typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore @@ -251,52 +142,32 @@ def async_smart_gpt( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def status_smart_gpt( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> SmartGptPageStatusResponse: + def post(self, *, request_options: typing.Optional[RequestOptions] = None) -> None: """ Parameters ---------- - run_id : str - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - SmartGptPageStatusResponse - Successful Response + None Examples -------- from gooey import Gooey client = Gooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) - client.smart_gpt.status_smart_gpt( - run_id="run_id", - ) + client.smart_gpt.post() """ _response = self._client_wrapper.httpx_client.request( - "v3/SmartGPT/status/", method="GET", params={"run_id": run_id}, request_options=request_options + "v2/SmartGPT/", method="POST", request_options=request_options ) try: if 200 <= _response.status_code < 300: - return typing.cast(SmartGptPageStatusResponse, parse_obj_as(type_=SmartGptPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + return _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -307,10 +178,11 @@ class AsyncSmartGptClient: def __init__(self, *, client_wrapper: AsyncClientWrapper): self._client_wrapper = client_wrapper - async def post( + async def async_smart_gpt( self, *, input_prompt: str, + example_id: typing.Optional[str] = None, functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, cot_prompt: typing.Optional[str] = OMIT, @@ -322,6 +194,7 @@ async def post( quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[SmartGptPageRequestResponseFormatType] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None ) -> SmartGptPageResponse: @@ -330,126 +203,7 @@ async def post( ---------- input_prompt : str - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - cot_prompt : typing.Optional[str] - - reflexion_prompt : typing.Optional[str] - - dera_prompt : typing.Optional[str] - - selected_model : typing.Optional[SmartGptPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SmartGptPageResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.smart_gpt.post( - input_prompt="input_prompt", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v2/SmartGPT/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "input_prompt": input_prompt, - "cot_prompt": cot_prompt, - "reflexion_prompt": reflexion_prompt, - "dera_prompt": dera_prompt, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(SmartGptPageResponse, parse_obj_as(type_=SmartGptPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def async_smart_gpt( - self, - *, - input_prompt: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - cot_prompt: typing.Optional[str] = OMIT, - reflexion_prompt: typing.Optional[str] = OMIT, - dera_prompt: typing.Optional[str] = OMIT, - selected_model: typing.Optional[SmartGptPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - input_prompt : str + example_id : typing.Optional[str] functions : typing.Optional[typing.Sequence[RecipeFunction]] @@ -474,6 +228,8 @@ async def async_smart_gpt( sampling_temperature : typing.Optional[float] + response_format_type : typing.Optional[SmartGptPageRequestResponseFormatType] + settings : typing.Optional[RunSettings] request_options : typing.Optional[RequestOptions] @@ -481,7 +237,7 @@ async def async_smart_gpt( Returns ------- - AsyncApiResponseModelV3 + SmartGptPageResponse Successful Response Examples @@ -491,7 +247,6 @@ async def async_smart_gpt( from gooey import AsyncGooey client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) @@ -505,8 +260,9 @@ async def main() -> None: asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/SmartGPT/async/", + "v3/SmartGPT/async", method="POST", + params={"example_id": example_id}, json={ "functions": functions, "variables": variables, @@ -520,6 +276,7 @@ async def main() -> None: "quality": quality, "max_tokens": max_tokens, "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, "settings": settings, }, request_options=request_options, @@ -527,7 +284,7 @@ async def main() -> None: ) try: if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore + return typing.cast(SmartGptPageResponse, parse_obj_as(type_=SmartGptPageResponse, object_=_response.json())) # type: ignore if _response.status_code == 402: raise PaymentRequiredError( typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore @@ -545,21 +302,16 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def status_smart_gpt( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> SmartGptPageStatusResponse: + async def post(self, *, request_options: typing.Optional[RequestOptions] = None) -> None: """ Parameters ---------- - run_id : str - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - SmartGptPageStatusResponse - Successful Response + None Examples -------- @@ -568,37 +320,22 @@ async def status_smart_gpt( from gooey import AsyncGooey client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", api_key="YOUR_API_KEY", ) async def main() -> None: - await client.smart_gpt.status_smart_gpt( - run_id="run_id", - ) + await client.smart_gpt.post() asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/SmartGPT/status/", method="GET", params={"run_id": run_id}, request_options=request_options + "v2/SmartGPT/", method="POST", request_options=request_options ) try: if 200 <= _response.status_code < 300: - return typing.cast(SmartGptPageStatusResponse, parse_obj_as(type_=SmartGptPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + return _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) diff --git a/src/gooey/smart_gpt/types/__init__.py b/src/gooey/smart_gpt/types/__init__.py new file mode 100644 index 0000000..3032d41 --- /dev/null +++ b/src/gooey/smart_gpt/types/__init__.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +from .smart_gpt_page_request_response_format_type import SmartGptPageRequestResponseFormatType +from .smart_gpt_page_request_selected_model import SmartGptPageRequestSelectedModel + +__all__ = ["SmartGptPageRequestResponseFormatType", "SmartGptPageRequestSelectedModel"] diff --git a/src/gooey/smart_gpt/types/smart_gpt_page_request_response_format_type.py b/src/gooey/smart_gpt/types/smart_gpt_page_request_response_format_type.py new file mode 100644 index 0000000..1eaf901 --- /dev/null +++ b/src/gooey/smart_gpt/types/smart_gpt_page_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +SmartGptPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/smart_gpt_page_request_selected_model.py b/src/gooey/smart_gpt/types/smart_gpt_page_request_selected_model.py similarity index 79% rename from src/gooey/types/smart_gpt_page_request_selected_model.py rename to src/gooey/smart_gpt/types/smart_gpt_page_request_selected_model.py index f5868c7..9142b8f 100644 --- a/src/gooey/types/smart_gpt_page_request_selected_model.py +++ b/src/gooey/smart_gpt/types/smart_gpt_page_request_selected_model.py @@ -5,6 +5,8 @@ SmartGptPageRequestSelectedModel = typing.Union[ typing.Literal[ "gpt_4_o", + "gpt_4_o_mini", + "chatgpt_4_o", "gpt_4_turbo_vision", "gpt_4_vision", "gpt_4_turbo", @@ -14,10 +16,14 @@ "gpt_3_5_turbo_16k", "gpt_3_5_turbo_instruct", "llama3_70b", + "llama_3_groq_70b_tool_use", "llama3_8b", + "llama_3_groq_8b_tool_use", "llama2_70b_chat", "mixtral_8x7b_instruct_0_1", + "gemma_2_9b_it", "gemma_7b_it", + "gemini_1_5_flash", "gemini_1_5_pro", "gemini_1_pro_vision", "gemini_1_pro", @@ -28,6 +34,8 @@ "claude_3_sonnet", "claude_3_haiku", "sea_lion_7b_instruct", + "llama3_8b_cpt_sea_lion_v2_instruct", + "sarvam_2b", "text_davinci_003", "text_davinci_002", "code_davinci_002", diff --git a/src/gooey/speech_recognition_translation/__init__.py b/src/gooey/speech_recognition_translation/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/speech_recognition_translation/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/speech_recognition_translation/client.py b/src/gooey/speech_recognition_translation/client.py deleted file mode 100644 index 3d90099..0000000 --- a/src/gooey/speech_recognition_translation/client.py +++ /dev/null @@ -1,603 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.internal_server_error import InternalServerError -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.asr_page_request_output_format import AsrPageRequestOutputFormat -from ..types.asr_page_request_selected_model import AsrPageRequestSelectedModel -from ..types.asr_page_request_translation_model import AsrPageRequestTranslationModel -from ..types.asr_page_response import AsrPageResponse -from ..types.asr_page_status_response import AsrPageStatusResponse -from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3 -from ..types.failed_reponse_model_v2 import FailedReponseModelV2 -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError -from ..types.recipe_function import RecipeFunction -from ..types.run_settings import RunSettings - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class SpeechRecognitionTranslationClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def asr( - self, - *, - documents: typing.Sequence[str], - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - selected_model: typing.Optional[AsrPageRequestSelectedModel] = OMIT, - language: typing.Optional[str] = OMIT, - translation_model: typing.Optional[AsrPageRequestTranslationModel] = OMIT, - output_format: typing.Optional[AsrPageRequestOutputFormat] = OMIT, - google_translate_target: typing.Optional[str] = OMIT, - translation_source: typing.Optional[str] = OMIT, - translation_target: typing.Optional[str] = OMIT, - glossary_document: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsrPageResponse: - """ - Parameters - ---------- - documents : typing.Sequence[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - selected_model : typing.Optional[AsrPageRequestSelectedModel] - - language : typing.Optional[str] - - translation_model : typing.Optional[AsrPageRequestTranslationModel] - - output_format : typing.Optional[AsrPageRequestOutputFormat] - - google_translate_target : typing.Optional[str] - use `translation_model` & `translation_target` instead. - - translation_source : typing.Optional[str] - - translation_target : typing.Optional[str] - - glossary_document : typing.Optional[str] - Provide a glossary to customize translation and improve accuracy of domain-specific terms. - If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsrPageResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.speech_recognition_translation.asr( - documents=["documents"], - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v2/asr/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "documents": documents, - "selected_model": selected_model, - "language": language, - "translation_model": translation_model, - "output_format": output_format, - "google_translate_target": google_translate_target, - "translation_source": translation_source, - "translation_target": translation_target, - "glossary_document": glossary_document, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsrPageResponse, parse_obj_as(type_=AsrPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def async_asr( - self, - *, - documents: typing.Sequence[str], - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - selected_model: typing.Optional[AsrPageRequestSelectedModel] = OMIT, - language: typing.Optional[str] = OMIT, - translation_model: typing.Optional[AsrPageRequestTranslationModel] = OMIT, - output_format: typing.Optional[AsrPageRequestOutputFormat] = OMIT, - google_translate_target: typing.Optional[str] = OMIT, - translation_source: typing.Optional[str] = OMIT, - translation_target: typing.Optional[str] = OMIT, - glossary_document: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - documents : typing.Sequence[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - selected_model : typing.Optional[AsrPageRequestSelectedModel] - - language : typing.Optional[str] - - translation_model : typing.Optional[AsrPageRequestTranslationModel] - - output_format : typing.Optional[AsrPageRequestOutputFormat] - - google_translate_target : typing.Optional[str] - use `translation_model` & `translation_target` instead. - - translation_source : typing.Optional[str] - - translation_target : typing.Optional[str] - - glossary_document : typing.Optional[str] - Provide a glossary to customize translation and improve accuracy of domain-specific terms. - If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.speech_recognition_translation.async_asr( - documents=["documents"], - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/asr/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "documents": documents, - "selected_model": selected_model, - "language": language, - "translation_model": translation_model, - "output_format": output_format, - "google_translate_target": google_translate_target, - "translation_source": translation_source, - "translation_target": translation_target, - "glossary_document": glossary_document, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def status_asr( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> AsrPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsrPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.speech_recognition_translation.status_asr( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/asr/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsrPageStatusResponse, parse_obj_as(type_=AsrPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncSpeechRecognitionTranslationClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def asr( - self, - *, - documents: typing.Sequence[str], - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - selected_model: typing.Optional[AsrPageRequestSelectedModel] = OMIT, - language: typing.Optional[str] = OMIT, - translation_model: typing.Optional[AsrPageRequestTranslationModel] = OMIT, - output_format: typing.Optional[AsrPageRequestOutputFormat] = OMIT, - google_translate_target: typing.Optional[str] = OMIT, - translation_source: typing.Optional[str] = OMIT, - translation_target: typing.Optional[str] = OMIT, - glossary_document: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsrPageResponse: - """ - Parameters - ---------- - documents : typing.Sequence[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - selected_model : typing.Optional[AsrPageRequestSelectedModel] - - language : typing.Optional[str] - - translation_model : typing.Optional[AsrPageRequestTranslationModel] - - output_format : typing.Optional[AsrPageRequestOutputFormat] - - google_translate_target : typing.Optional[str] - use `translation_model` & `translation_target` instead. - - translation_source : typing.Optional[str] - - translation_target : typing.Optional[str] - - glossary_document : typing.Optional[str] - Provide a glossary to customize translation and improve accuracy of domain-specific terms. - If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsrPageResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.speech_recognition_translation.asr( - documents=["documents"], - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v2/asr/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "documents": documents, - "selected_model": selected_model, - "language": language, - "translation_model": translation_model, - "output_format": output_format, - "google_translate_target": google_translate_target, - "translation_source": translation_source, - "translation_target": translation_target, - "glossary_document": glossary_document, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsrPageResponse, parse_obj_as(type_=AsrPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def async_asr( - self, - *, - documents: typing.Sequence[str], - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - selected_model: typing.Optional[AsrPageRequestSelectedModel] = OMIT, - language: typing.Optional[str] = OMIT, - translation_model: typing.Optional[AsrPageRequestTranslationModel] = OMIT, - output_format: typing.Optional[AsrPageRequestOutputFormat] = OMIT, - google_translate_target: typing.Optional[str] = OMIT, - translation_source: typing.Optional[str] = OMIT, - translation_target: typing.Optional[str] = OMIT, - glossary_document: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - documents : typing.Sequence[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - selected_model : typing.Optional[AsrPageRequestSelectedModel] - - language : typing.Optional[str] - - translation_model : typing.Optional[AsrPageRequestTranslationModel] - - output_format : typing.Optional[AsrPageRequestOutputFormat] - - google_translate_target : typing.Optional[str] - use `translation_model` & `translation_target` instead. - - translation_source : typing.Optional[str] - - translation_target : typing.Optional[str] - - glossary_document : typing.Optional[str] - Provide a glossary to customize translation and improve accuracy of domain-specific terms. - If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.speech_recognition_translation.async_asr( - documents=["documents"], - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/asr/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "documents": documents, - "selected_model": selected_model, - "language": language, - "translation_model": translation_model, - "output_format": output_format, - "google_translate_target": google_translate_target, - "translation_source": translation_source, - "translation_target": translation_target, - "glossary_document": glossary_document, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def status_asr( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> AsrPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsrPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.speech_recognition_translation.status_asr( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/asr/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsrPageStatusResponse, parse_obj_as(type_=AsrPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/summarize_your_docs_with_gpt/__init__.py b/src/gooey/summarize_your_docs_with_gpt/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/summarize_your_docs_with_gpt/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/summarize_your_docs_with_gpt/client.py b/src/gooey/summarize_your_docs_with_gpt/client.py deleted file mode 100644 index 744cb9a..0000000 --- a/src/gooey/summarize_your_docs_with_gpt/client.py +++ /dev/null @@ -1,638 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.internal_server_error import InternalServerError -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3 -from ..types.doc_summary_page_request_selected_asr_model import DocSummaryPageRequestSelectedAsrModel -from ..types.doc_summary_page_request_selected_model import DocSummaryPageRequestSelectedModel -from ..types.doc_summary_page_response import DocSummaryPageResponse -from ..types.doc_summary_page_status_response import DocSummaryPageStatusResponse -from ..types.failed_reponse_model_v2 import FailedReponseModelV2 -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError -from ..types.recipe_function import RecipeFunction -from ..types.run_settings import RunSettings - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class SummarizeYourDocsWithGptClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def doc_summary( - self, - *, - documents: typing.Sequence[str], - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - merge_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[DocSummaryPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - chain_type: typing.Optional[typing.Literal["map_reduce"]] = OMIT, - selected_asr_model: typing.Optional[DocSummaryPageRequestSelectedAsrModel] = OMIT, - google_translate_target: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> DocSummaryPageResponse: - """ - Parameters - ---------- - documents : typing.Sequence[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - task_instructions : typing.Optional[str] - - merge_instructions : typing.Optional[str] - - selected_model : typing.Optional[DocSummaryPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - chain_type : typing.Optional[typing.Literal["map_reduce"]] - - selected_asr_model : typing.Optional[DocSummaryPageRequestSelectedAsrModel] - - google_translate_target : typing.Optional[str] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DocSummaryPageResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.summarize_your_docs_with_gpt.doc_summary( - documents=["documents"], - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v2/doc-summary/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "documents": documents, - "task_instructions": task_instructions, - "merge_instructions": merge_instructions, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "chain_type": chain_type, - "selected_asr_model": selected_asr_model, - "google_translate_target": google_translate_target, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(DocSummaryPageResponse, parse_obj_as(type_=DocSummaryPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def async_doc_summary( - self, - *, - documents: typing.Sequence[str], - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - merge_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[DocSummaryPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - chain_type: typing.Optional[typing.Literal["map_reduce"]] = OMIT, - selected_asr_model: typing.Optional[DocSummaryPageRequestSelectedAsrModel] = OMIT, - google_translate_target: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - documents : typing.Sequence[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - task_instructions : typing.Optional[str] - - merge_instructions : typing.Optional[str] - - selected_model : typing.Optional[DocSummaryPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - chain_type : typing.Optional[typing.Literal["map_reduce"]] - - selected_asr_model : typing.Optional[DocSummaryPageRequestSelectedAsrModel] - - google_translate_target : typing.Optional[str] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.summarize_your_docs_with_gpt.async_doc_summary( - documents=["documents"], - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/doc-summary/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "documents": documents, - "task_instructions": task_instructions, - "merge_instructions": merge_instructions, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "chain_type": chain_type, - "selected_asr_model": selected_asr_model, - "google_translate_target": google_translate_target, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def status_doc_summary( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> DocSummaryPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DocSummaryPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.summarize_your_docs_with_gpt.status_doc_summary( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/doc-summary/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(DocSummaryPageStatusResponse, parse_obj_as(type_=DocSummaryPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncSummarizeYourDocsWithGptClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def doc_summary( - self, - *, - documents: typing.Sequence[str], - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - merge_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[DocSummaryPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - chain_type: typing.Optional[typing.Literal["map_reduce"]] = OMIT, - selected_asr_model: typing.Optional[DocSummaryPageRequestSelectedAsrModel] = OMIT, - google_translate_target: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> DocSummaryPageResponse: - """ - Parameters - ---------- - documents : typing.Sequence[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - task_instructions : typing.Optional[str] - - merge_instructions : typing.Optional[str] - - selected_model : typing.Optional[DocSummaryPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - chain_type : typing.Optional[typing.Literal["map_reduce"]] - - selected_asr_model : typing.Optional[DocSummaryPageRequestSelectedAsrModel] - - google_translate_target : typing.Optional[str] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DocSummaryPageResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.summarize_your_docs_with_gpt.doc_summary( - documents=["documents"], - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v2/doc-summary/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "documents": documents, - "task_instructions": task_instructions, - "merge_instructions": merge_instructions, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "chain_type": chain_type, - "selected_asr_model": selected_asr_model, - "google_translate_target": google_translate_target, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(DocSummaryPageResponse, parse_obj_as(type_=DocSummaryPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def async_doc_summary( - self, - *, - documents: typing.Sequence[str], - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - merge_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[DocSummaryPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - chain_type: typing.Optional[typing.Literal["map_reduce"]] = OMIT, - selected_asr_model: typing.Optional[DocSummaryPageRequestSelectedAsrModel] = OMIT, - google_translate_target: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - documents : typing.Sequence[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - task_instructions : typing.Optional[str] - - merge_instructions : typing.Optional[str] - - selected_model : typing.Optional[DocSummaryPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - chain_type : typing.Optional[typing.Literal["map_reduce"]] - - selected_asr_model : typing.Optional[DocSummaryPageRequestSelectedAsrModel] - - google_translate_target : typing.Optional[str] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.summarize_your_docs_with_gpt.async_doc_summary( - documents=["documents"], - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/doc-summary/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "documents": documents, - "task_instructions": task_instructions, - "merge_instructions": merge_instructions, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "chain_type": chain_type, - "selected_asr_model": selected_asr_model, - "google_translate_target": google_translate_target, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def status_doc_summary( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> DocSummaryPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DocSummaryPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.summarize_your_docs_with_gpt.status_doc_summary( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/doc-summary/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(DocSummaryPageStatusResponse, parse_obj_as(type_=DocSummaryPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/synthetic_data_maker_for_videos_pd_fs/__init__.py b/src/gooey/synthetic_data_maker_for_videos_pd_fs/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/synthetic_data_maker_for_videos_pd_fs/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/synthetic_data_maker_for_videos_pd_fs/client.py b/src/gooey/synthetic_data_maker_for_videos_pd_fs/client.py deleted file mode 100644 index e5bdc26..0000000 --- a/src/gooey/synthetic_data_maker_for_videos_pd_fs/client.py +++ /dev/null @@ -1,646 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.internal_server_error import InternalServerError -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3 -from ..types.doc_extract_page_request_selected_asr_model import DocExtractPageRequestSelectedAsrModel -from ..types.doc_extract_page_request_selected_model import DocExtractPageRequestSelectedModel -from ..types.doc_extract_page_response import DocExtractPageResponse -from ..types.doc_extract_page_status_response import DocExtractPageStatusResponse -from ..types.failed_reponse_model_v2 import FailedReponseModelV2 -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError -from ..types.recipe_function import RecipeFunction -from ..types.run_settings import RunSettings - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class SyntheticDataMakerForVideosPdFsClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def doc_extract( - self, - *, - documents: typing.Sequence[str], - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - sheet_url: typing.Optional[str] = OMIT, - selected_asr_model: typing.Optional[DocExtractPageRequestSelectedAsrModel] = OMIT, - google_translate_target: typing.Optional[str] = OMIT, - glossary_document: typing.Optional[str] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[DocExtractPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> DocExtractPageResponse: - """ - Parameters - ---------- - documents : typing.Sequence[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - sheet_url : typing.Optional[str] - - selected_asr_model : typing.Optional[DocExtractPageRequestSelectedAsrModel] - - google_translate_target : typing.Optional[str] - - glossary_document : typing.Optional[str] - Provide a glossary to customize translation and improve accuracy of domain-specific terms. - If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). - - task_instructions : typing.Optional[str] - - selected_model : typing.Optional[DocExtractPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DocExtractPageResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.synthetic_data_maker_for_videos_pd_fs.doc_extract( - documents=["documents"], - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v2/doc-extract/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "documents": documents, - "sheet_url": sheet_url, - "selected_asr_model": selected_asr_model, - "google_translate_target": google_translate_target, - "glossary_document": glossary_document, - "task_instructions": task_instructions, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(DocExtractPageResponse, parse_obj_as(type_=DocExtractPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def async_doc_extract( - self, - *, - documents: typing.Sequence[str], - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - sheet_url: typing.Optional[str] = OMIT, - selected_asr_model: typing.Optional[DocExtractPageRequestSelectedAsrModel] = OMIT, - google_translate_target: typing.Optional[str] = OMIT, - glossary_document: typing.Optional[str] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[DocExtractPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - documents : typing.Sequence[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - sheet_url : typing.Optional[str] - - selected_asr_model : typing.Optional[DocExtractPageRequestSelectedAsrModel] - - google_translate_target : typing.Optional[str] - - glossary_document : typing.Optional[str] - Provide a glossary to customize translation and improve accuracy of domain-specific terms. - If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). - - task_instructions : typing.Optional[str] - - selected_model : typing.Optional[DocExtractPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.synthetic_data_maker_for_videos_pd_fs.async_doc_extract( - documents=["documents"], - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/doc-extract/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "documents": documents, - "sheet_url": sheet_url, - "selected_asr_model": selected_asr_model, - "google_translate_target": google_translate_target, - "glossary_document": glossary_document, - "task_instructions": task_instructions, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def status_doc_extract( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> DocExtractPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DocExtractPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.synthetic_data_maker_for_videos_pd_fs.status_doc_extract( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/doc-extract/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(DocExtractPageStatusResponse, parse_obj_as(type_=DocExtractPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncSyntheticDataMakerForVideosPdFsClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def doc_extract( - self, - *, - documents: typing.Sequence[str], - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - sheet_url: typing.Optional[str] = OMIT, - selected_asr_model: typing.Optional[DocExtractPageRequestSelectedAsrModel] = OMIT, - google_translate_target: typing.Optional[str] = OMIT, - glossary_document: typing.Optional[str] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[DocExtractPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> DocExtractPageResponse: - """ - Parameters - ---------- - documents : typing.Sequence[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - sheet_url : typing.Optional[str] - - selected_asr_model : typing.Optional[DocExtractPageRequestSelectedAsrModel] - - google_translate_target : typing.Optional[str] - - glossary_document : typing.Optional[str] - Provide a glossary to customize translation and improve accuracy of domain-specific terms. - If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). - - task_instructions : typing.Optional[str] - - selected_model : typing.Optional[DocExtractPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DocExtractPageResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.synthetic_data_maker_for_videos_pd_fs.doc_extract( - documents=["documents"], - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v2/doc-extract/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "documents": documents, - "sheet_url": sheet_url, - "selected_asr_model": selected_asr_model, - "google_translate_target": google_translate_target, - "glossary_document": glossary_document, - "task_instructions": task_instructions, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(DocExtractPageResponse, parse_obj_as(type_=DocExtractPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def async_doc_extract( - self, - *, - documents: typing.Sequence[str], - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - sheet_url: typing.Optional[str] = OMIT, - selected_asr_model: typing.Optional[DocExtractPageRequestSelectedAsrModel] = OMIT, - google_translate_target: typing.Optional[str] = OMIT, - glossary_document: typing.Optional[str] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[DocExtractPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - documents : typing.Sequence[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - sheet_url : typing.Optional[str] - - selected_asr_model : typing.Optional[DocExtractPageRequestSelectedAsrModel] - - google_translate_target : typing.Optional[str] - - glossary_document : typing.Optional[str] - Provide a glossary to customize translation and improve accuracy of domain-specific terms. - If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). - - task_instructions : typing.Optional[str] - - selected_model : typing.Optional[DocExtractPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.synthetic_data_maker_for_videos_pd_fs.async_doc_extract( - documents=["documents"], - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/doc-extract/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "documents": documents, - "sheet_url": sheet_url, - "selected_asr_model": selected_asr_model, - "google_translate_target": google_translate_target, - "glossary_document": glossary_document, - "task_instructions": task_instructions, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def status_doc_extract( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> DocExtractPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DocExtractPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.synthetic_data_maker_for_videos_pd_fs.status_doc_extract( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/doc-extract/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(DocExtractPageStatusResponse, parse_obj_as(type_=DocExtractPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/text_guided_audio_generator/__init__.py b/src/gooey/text_guided_audio_generator/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/text_guided_audio_generator/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/text_guided_audio_generator/client.py b/src/gooey/text_guided_audio_generator/client.py deleted file mode 100644 index fc01fca..0000000 --- a/src/gooey/text_guided_audio_generator/client.py +++ /dev/null @@ -1,588 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.internal_server_error import InternalServerError -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3 -from ..types.failed_reponse_model_v2 import FailedReponseModelV2 -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError -from ..types.recipe_function import RecipeFunction -from ..types.run_settings import RunSettings -from ..types.text2audio_page_response import Text2AudioPageResponse -from ..types.text2audio_page_status_response import Text2AudioPageStatusResponse - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class TextGuidedAudioGeneratorClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def text2audio( - self, - *, - text_prompt: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - duration_sec: typing.Optional[float] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - seed: typing.Optional[int] = OMIT, - sd2upscaling: typing.Optional[bool] = OMIT, - selected_models: typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> Text2AudioPageResponse: - """ - Parameters - ---------- - text_prompt : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - negative_prompt : typing.Optional[str] - - duration_sec : typing.Optional[float] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - seed : typing.Optional[int] - - sd2upscaling : typing.Optional[bool] - - selected_models : typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - Text2AudioPageResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.text_guided_audio_generator.text2audio( - text_prompt="text_prompt", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v2/text2audio/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "text_prompt": text_prompt, - "negative_prompt": negative_prompt, - "duration_sec": duration_sec, - "num_outputs": num_outputs, - "quality": quality, - "guidance_scale": guidance_scale, - "seed": seed, - "sd_2_upscaling": sd2upscaling, - "selected_models": selected_models, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(Text2AudioPageResponse, parse_obj_as(type_=Text2AudioPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def async_text2audio( - self, - *, - text_prompt: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - duration_sec: typing.Optional[float] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - seed: typing.Optional[int] = OMIT, - sd2upscaling: typing.Optional[bool] = OMIT, - selected_models: typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - text_prompt : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - negative_prompt : typing.Optional[str] - - duration_sec : typing.Optional[float] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - seed : typing.Optional[int] - - sd2upscaling : typing.Optional[bool] - - selected_models : typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.text_guided_audio_generator.async_text2audio( - text_prompt="text_prompt", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/text2audio/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "text_prompt": text_prompt, - "negative_prompt": negative_prompt, - "duration_sec": duration_sec, - "num_outputs": num_outputs, - "quality": quality, - "guidance_scale": guidance_scale, - "seed": seed, - "sd_2_upscaling": sd2upscaling, - "selected_models": selected_models, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def status_text2audio( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> Text2AudioPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - Text2AudioPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.text_guided_audio_generator.status_text2audio( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/text2audio/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(Text2AudioPageStatusResponse, parse_obj_as(type_=Text2AudioPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncTextGuidedAudioGeneratorClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def text2audio( - self, - *, - text_prompt: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - duration_sec: typing.Optional[float] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - seed: typing.Optional[int] = OMIT, - sd2upscaling: typing.Optional[bool] = OMIT, - selected_models: typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> Text2AudioPageResponse: - """ - Parameters - ---------- - text_prompt : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - negative_prompt : typing.Optional[str] - - duration_sec : typing.Optional[float] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - seed : typing.Optional[int] - - sd2upscaling : typing.Optional[bool] - - selected_models : typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - Text2AudioPageResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.text_guided_audio_generator.text2audio( - text_prompt="text_prompt", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v2/text2audio/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "text_prompt": text_prompt, - "negative_prompt": negative_prompt, - "duration_sec": duration_sec, - "num_outputs": num_outputs, - "quality": quality, - "guidance_scale": guidance_scale, - "seed": seed, - "sd_2_upscaling": sd2upscaling, - "selected_models": selected_models, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(Text2AudioPageResponse, parse_obj_as(type_=Text2AudioPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def async_text2audio( - self, - *, - text_prompt: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - duration_sec: typing.Optional[float] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - seed: typing.Optional[int] = OMIT, - sd2upscaling: typing.Optional[bool] = OMIT, - selected_models: typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - text_prompt : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - negative_prompt : typing.Optional[str] - - duration_sec : typing.Optional[float] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - seed : typing.Optional[int] - - sd2upscaling : typing.Optional[bool] - - selected_models : typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.text_guided_audio_generator.async_text2audio( - text_prompt="text_prompt", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/text2audio/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "text_prompt": text_prompt, - "negative_prompt": negative_prompt, - "duration_sec": duration_sec, - "num_outputs": num_outputs, - "quality": quality, - "guidance_scale": guidance_scale, - "seed": seed, - "sd_2_upscaling": sd2upscaling, - "selected_models": selected_models, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def status_text2audio( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> Text2AudioPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - Text2AudioPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.text_guided_audio_generator.status_text2audio( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/text2audio/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(Text2AudioPageStatusResponse, parse_obj_as(type_=Text2AudioPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/types/__init__.py b/src/gooey/types/__init__.py index eab2baa..d49ace4 100644 --- a/src/gooey/types/__init__.py +++ b/src/gooey/types/__init__.py @@ -9,7 +9,6 @@ from .asr_output_json import AsrOutputJson from .asr_page_output import AsrPageOutput from .asr_page_output_output_text_item import AsrPageOutputOutputTextItem -from .asr_page_request import AsrPageRequest from .asr_page_request_output_format import AsrPageRequestOutputFormat from .asr_page_request_selected_model import AsrPageRequestSelectedModel from .asr_page_request_translation_model import AsrPageRequestTranslationModel @@ -19,12 +18,9 @@ from .balance_response import BalanceResponse from .bot_broadcast_filters import BotBroadcastFilters from .bulk_eval_page_output import BulkEvalPageOutput -from .bulk_eval_page_request import BulkEvalPageRequest -from .bulk_eval_page_request_selected_model import BulkEvalPageRequestSelectedModel from .bulk_eval_page_response import BulkEvalPageResponse from .bulk_eval_page_status_response import BulkEvalPageStatusResponse from .bulk_runner_page_output import BulkRunnerPageOutput -from .bulk_runner_page_request import BulkRunnerPageRequest from .bulk_runner_page_response import BulkRunnerPageResponse from .bulk_runner_page_status_response import BulkRunnerPageStatusResponse from .button_pressed import ButtonPressed @@ -37,19 +33,16 @@ from .chyron_plant_page_response import ChyronPlantPageResponse from .chyron_plant_page_status_response import ChyronPlantPageStatusResponse from .compare_llm_page_output import CompareLlmPageOutput -from .compare_llm_page_request import CompareLlmPageRequest from .compare_llm_page_request_response_format_type import CompareLlmPageRequestResponseFormatType from .compare_llm_page_request_selected_models_item import CompareLlmPageRequestSelectedModelsItem from .compare_llm_page_response import CompareLlmPageResponse from .compare_llm_page_status_response import CompareLlmPageStatusResponse from .compare_text2img_page_output import CompareText2ImgPageOutput -from .compare_text2img_page_request import CompareText2ImgPageRequest from .compare_text2img_page_request_scheduler import CompareText2ImgPageRequestScheduler from .compare_text2img_page_request_selected_models_item import CompareText2ImgPageRequestSelectedModelsItem from .compare_text2img_page_response import CompareText2ImgPageResponse from .compare_text2img_page_status_response import CompareText2ImgPageStatusResponse from .compare_upscaler_page_output import CompareUpscalerPageOutput -from .compare_upscaler_page_request import CompareUpscalerPageRequest from .compare_upscaler_page_request_selected_models_item import CompareUpscalerPageRequestSelectedModelsItem from .compare_upscaler_page_response import CompareUpscalerPageResponse from .compare_upscaler_page_status_response import CompareUpscalerPageStatusResponse @@ -66,43 +59,39 @@ from .conversation_start import ConversationStart from .create_stream_response import CreateStreamResponse from .deforum_sd_page_output import DeforumSdPageOutput -from .deforum_sd_page_request import DeforumSdPageRequest from .deforum_sd_page_request_selected_model import DeforumSdPageRequestSelectedModel from .deforum_sd_page_response import DeforumSdPageResponse from .deforum_sd_page_status_response import DeforumSdPageStatusResponse from .doc_extract_page_output import DocExtractPageOutput -from .doc_extract_page_request import DocExtractPageRequest +from .doc_extract_page_request_response_format_type import DocExtractPageRequestResponseFormatType from .doc_extract_page_request_selected_asr_model import DocExtractPageRequestSelectedAsrModel from .doc_extract_page_request_selected_model import DocExtractPageRequestSelectedModel from .doc_extract_page_response import DocExtractPageResponse from .doc_extract_page_status_response import DocExtractPageStatusResponse from .doc_search_page_output import DocSearchPageOutput -from .doc_search_page_request import DocSearchPageRequest from .doc_search_page_request_citation_style import DocSearchPageRequestCitationStyle from .doc_search_page_request_embedding_model import DocSearchPageRequestEmbeddingModel from .doc_search_page_request_keyword_query import DocSearchPageRequestKeywordQuery +from .doc_search_page_request_response_format_type import DocSearchPageRequestResponseFormatType from .doc_search_page_request_selected_model import DocSearchPageRequestSelectedModel from .doc_search_page_response import DocSearchPageResponse from .doc_search_page_status_response import DocSearchPageStatusResponse from .doc_summary_page_output import DocSummaryPageOutput -from .doc_summary_page_request import DocSummaryPageRequest +from .doc_summary_page_request_response_format_type import DocSummaryPageRequestResponseFormatType from .doc_summary_page_request_selected_asr_model import DocSummaryPageRequestSelectedAsrModel from .doc_summary_page_request_selected_model import DocSummaryPageRequestSelectedModel from .doc_summary_page_response import DocSummaryPageResponse from .doc_summary_page_status_response import DocSummaryPageStatusResponse from .email_face_inpainting_page_output import EmailFaceInpaintingPageOutput -from .email_face_inpainting_page_request import EmailFaceInpaintingPageRequest from .email_face_inpainting_page_request_selected_model import EmailFaceInpaintingPageRequestSelectedModel from .email_face_inpainting_page_response import EmailFaceInpaintingPageResponse from .email_face_inpainting_page_status_response import EmailFaceInpaintingPageStatusResponse from .embeddings_page_output import EmbeddingsPageOutput -from .embeddings_page_request import EmbeddingsPageRequest from .embeddings_page_request_selected_model import EmbeddingsPageRequestSelectedModel from .embeddings_page_response import EmbeddingsPageResponse from .embeddings_page_status_response import EmbeddingsPageStatusResponse from .eval_prompt import EvalPrompt from .face_inpainting_page_output import FaceInpaintingPageOutput -from .face_inpainting_page_request import FaceInpaintingPageRequest from .face_inpainting_page_request_selected_model import FaceInpaintingPageRequestSelectedModel from .face_inpainting_page_response import FaceInpaintingPageResponse from .face_inpainting_page_status_response import FaceInpaintingPageStatusResponse @@ -110,32 +99,28 @@ from .failed_response_detail import FailedResponseDetail from .final_response import FinalResponse from .functions_page_output import FunctionsPageOutput -from .functions_page_request import FunctionsPageRequest from .functions_page_response import FunctionsPageResponse from .functions_page_status_response import FunctionsPageStatusResponse from .generic_error_response import GenericErrorResponse from .generic_error_response_detail import GenericErrorResponseDetail from .google_gpt_page_output import GoogleGptPageOutput -from .google_gpt_page_request import GoogleGptPageRequest from .google_gpt_page_request_embedding_model import GoogleGptPageRequestEmbeddingModel +from .google_gpt_page_request_response_format_type import GoogleGptPageRequestResponseFormatType from .google_gpt_page_request_selected_model import GoogleGptPageRequestSelectedModel from .google_gpt_page_response import GoogleGptPageResponse from .google_gpt_page_status_response import GoogleGptPageStatusResponse from .google_image_gen_page_output import GoogleImageGenPageOutput -from .google_image_gen_page_request import GoogleImageGenPageRequest from .google_image_gen_page_request_selected_model import GoogleImageGenPageRequestSelectedModel from .google_image_gen_page_response import GoogleImageGenPageResponse from .google_image_gen_page_status_response import GoogleImageGenPageStatusResponse from .http_validation_error import HttpValidationError from .image_segmentation_page_output import ImageSegmentationPageOutput -from .image_segmentation_page_request import ImageSegmentationPageRequest from .image_segmentation_page_request_selected_model import ImageSegmentationPageRequestSelectedModel from .image_segmentation_page_response import ImageSegmentationPageResponse from .image_segmentation_page_status_response import ImageSegmentationPageStatusResponse from .image_url import ImageUrl from .image_url_detail import ImageUrlDetail from .img2img_page_output import Img2ImgPageOutput -from .img2img_page_request import Img2ImgPageRequest from .img2img_page_request_selected_controlnet_model import Img2ImgPageRequestSelectedControlnetModel from .img2img_page_request_selected_controlnet_model_item import Img2ImgPageRequestSelectedControlnetModelItem from .img2img_page_request_selected_model import Img2ImgPageRequestSelectedModel @@ -146,12 +131,9 @@ from .letter_writer_page_response import LetterWriterPageResponse from .letter_writer_page_status_response import LetterWriterPageStatusResponse from .lipsync_page_output import LipsyncPageOutput -from .lipsync_page_request import LipsyncPageRequest -from .lipsync_page_request_selected_model import LipsyncPageRequestSelectedModel from .lipsync_page_response import LipsyncPageResponse from .lipsync_page_status_response import LipsyncPageStatusResponse from .lipsync_tts_page_output import LipsyncTtsPageOutput -from .lipsync_tts_page_request import LipsyncTtsPageRequest from .lipsync_tts_page_request_openai_tts_model import LipsyncTtsPageRequestOpenaiTtsModel from .lipsync_tts_page_request_openai_voice_name import LipsyncTtsPageRequestOpenaiVoiceName from .lipsync_tts_page_request_selected_model import LipsyncTtsPageRequestSelectedModel @@ -161,14 +143,12 @@ from .llm_tools import LlmTools from .message_part import MessagePart from .object_inpainting_page_output import ObjectInpaintingPageOutput -from .object_inpainting_page_request import ObjectInpaintingPageRequest from .object_inpainting_page_request_selected_model import ObjectInpaintingPageRequestSelectedModel from .object_inpainting_page_response import ObjectInpaintingPageResponse from .object_inpainting_page_status_response import ObjectInpaintingPageStatusResponse from .prompt_tree_node import PromptTreeNode from .prompt_tree_node_prompt import PromptTreeNodePrompt from .qr_code_generator_page_output import QrCodeGeneratorPageOutput -from .qr_code_generator_page_request import QrCodeGeneratorPageRequest from .qr_code_generator_page_request_image_prompt_controlnet_models_item import ( QrCodeGeneratorPageRequestImagePromptControlnetModelsItem, ) @@ -185,16 +165,16 @@ from .related_doc_search_response import RelatedDocSearchResponse from .related_google_gpt_response import RelatedGoogleGptResponse from .related_qn_a_doc_page_output import RelatedQnADocPageOutput -from .related_qn_a_doc_page_request import RelatedQnADocPageRequest from .related_qn_a_doc_page_request_citation_style import RelatedQnADocPageRequestCitationStyle from .related_qn_a_doc_page_request_embedding_model import RelatedQnADocPageRequestEmbeddingModel from .related_qn_a_doc_page_request_keyword_query import RelatedQnADocPageRequestKeywordQuery +from .related_qn_a_doc_page_request_response_format_type import RelatedQnADocPageRequestResponseFormatType from .related_qn_a_doc_page_request_selected_model import RelatedQnADocPageRequestSelectedModel from .related_qn_a_doc_page_response import RelatedQnADocPageResponse from .related_qn_a_doc_page_status_response import RelatedQnADocPageStatusResponse from .related_qn_a_page_output import RelatedQnAPageOutput -from .related_qn_a_page_request import RelatedQnAPageRequest from .related_qn_a_page_request_embedding_model import RelatedQnAPageRequestEmbeddingModel +from .related_qn_a_page_request_response_format_type import RelatedQnAPageRequestResponseFormatType from .related_qn_a_page_request_selected_model import RelatedQnAPageRequestSelectedModel from .related_qn_a_page_response import RelatedQnAPageResponse from .related_qn_a_page_status_response import RelatedQnAPageStatusResponse @@ -209,29 +189,25 @@ from .sad_talker_settings_preprocess import SadTalkerSettingsPreprocess from .search_reference import SearchReference from .seo_summary_page_output import SeoSummaryPageOutput -from .seo_summary_page_request import SeoSummaryPageRequest +from .seo_summary_page_request_response_format_type import SeoSummaryPageRequestResponseFormatType from .seo_summary_page_request_selected_model import SeoSummaryPageRequestSelectedModel from .seo_summary_page_response import SeoSummaryPageResponse from .seo_summary_page_status_response import SeoSummaryPageStatusResponse from .serp_search_location import SerpSearchLocation from .serp_search_type import SerpSearchType from .smart_gpt_page_output import SmartGptPageOutput -from .smart_gpt_page_request import SmartGptPageRequest -from .smart_gpt_page_request_selected_model import SmartGptPageRequestSelectedModel from .smart_gpt_page_response import SmartGptPageResponse from .smart_gpt_page_status_response import SmartGptPageStatusResponse from .social_lookup_email_page_output import SocialLookupEmailPageOutput -from .social_lookup_email_page_request import SocialLookupEmailPageRequest +from .social_lookup_email_page_request_response_format_type import SocialLookupEmailPageRequestResponseFormatType from .social_lookup_email_page_request_selected_model import SocialLookupEmailPageRequestSelectedModel from .social_lookup_email_page_response import SocialLookupEmailPageResponse from .social_lookup_email_page_status_response import SocialLookupEmailPageStatusResponse from .stream_error import StreamError from .text2audio_page_output import Text2AudioPageOutput -from .text2audio_page_request import Text2AudioPageRequest from .text2audio_page_response import Text2AudioPageResponse from .text2audio_page_status_response import Text2AudioPageStatusResponse from .text_to_speech_page_output import TextToSpeechPageOutput -from .text_to_speech_page_request import TextToSpeechPageRequest from .text_to_speech_page_request_openai_tts_model import TextToSpeechPageRequestOpenaiTtsModel from .text_to_speech_page_request_openai_voice_name import TextToSpeechPageRequestOpenaiVoiceName from .text_to_speech_page_request_tts_provider import TextToSpeechPageRequestTtsProvider @@ -239,7 +215,6 @@ from .text_to_speech_page_status_response import TextToSpeechPageStatusResponse from .training_data_model import TrainingDataModel from .translation_page_output import TranslationPageOutput -from .translation_page_request import TranslationPageRequest from .translation_page_request_selected_model import TranslationPageRequestSelectedModel from .translation_page_response import TranslationPageResponse from .translation_page_status_response import TranslationPageStatusResponse @@ -249,16 +224,6 @@ from .video_bots_page_output import VideoBotsPageOutput from .video_bots_page_output_final_keyword_query import VideoBotsPageOutputFinalKeywordQuery from .video_bots_page_output_final_prompt import VideoBotsPageOutputFinalPrompt -from .video_bots_page_request import VideoBotsPageRequest -from .video_bots_page_request_asr_model import VideoBotsPageRequestAsrModel -from .video_bots_page_request_citation_style import VideoBotsPageRequestCitationStyle -from .video_bots_page_request_embedding_model import VideoBotsPageRequestEmbeddingModel -from .video_bots_page_request_lipsync_model import VideoBotsPageRequestLipsyncModel -from .video_bots_page_request_openai_tts_model import VideoBotsPageRequestOpenaiTtsModel -from .video_bots_page_request_openai_voice_name import VideoBotsPageRequestOpenaiVoiceName -from .video_bots_page_request_selected_model import VideoBotsPageRequestSelectedModel -from .video_bots_page_request_translation_model import VideoBotsPageRequestTranslationModel -from .video_bots_page_request_tts_provider import VideoBotsPageRequestTtsProvider from .video_bots_page_response import VideoBotsPageResponse from .video_bots_page_status_response import VideoBotsPageStatusResponse @@ -272,7 +237,6 @@ "AsrOutputJson", "AsrPageOutput", "AsrPageOutputOutputTextItem", - "AsrPageRequest", "AsrPageRequestOutputFormat", "AsrPageRequestSelectedModel", "AsrPageRequestTranslationModel", @@ -282,12 +246,9 @@ "BalanceResponse", "BotBroadcastFilters", "BulkEvalPageOutput", - "BulkEvalPageRequest", - "BulkEvalPageRequestSelectedModel", "BulkEvalPageResponse", "BulkEvalPageStatusResponse", "BulkRunnerPageOutput", - "BulkRunnerPageRequest", "BulkRunnerPageResponse", "BulkRunnerPageStatusResponse", "ButtonPressed", @@ -300,19 +261,16 @@ "ChyronPlantPageResponse", "ChyronPlantPageStatusResponse", "CompareLlmPageOutput", - "CompareLlmPageRequest", "CompareLlmPageRequestResponseFormatType", "CompareLlmPageRequestSelectedModelsItem", "CompareLlmPageResponse", "CompareLlmPageStatusResponse", "CompareText2ImgPageOutput", - "CompareText2ImgPageRequest", "CompareText2ImgPageRequestScheduler", "CompareText2ImgPageRequestSelectedModelsItem", "CompareText2ImgPageResponse", "CompareText2ImgPageStatusResponse", "CompareUpscalerPageOutput", - "CompareUpscalerPageRequest", "CompareUpscalerPageRequestSelectedModelsItem", "CompareUpscalerPageResponse", "CompareUpscalerPageStatusResponse", @@ -327,43 +285,39 @@ "ConversationStart", "CreateStreamResponse", "DeforumSdPageOutput", - "DeforumSdPageRequest", "DeforumSdPageRequestSelectedModel", "DeforumSdPageResponse", "DeforumSdPageStatusResponse", "DocExtractPageOutput", - "DocExtractPageRequest", + "DocExtractPageRequestResponseFormatType", "DocExtractPageRequestSelectedAsrModel", "DocExtractPageRequestSelectedModel", "DocExtractPageResponse", "DocExtractPageStatusResponse", "DocSearchPageOutput", - "DocSearchPageRequest", "DocSearchPageRequestCitationStyle", "DocSearchPageRequestEmbeddingModel", "DocSearchPageRequestKeywordQuery", + "DocSearchPageRequestResponseFormatType", "DocSearchPageRequestSelectedModel", "DocSearchPageResponse", "DocSearchPageStatusResponse", "DocSummaryPageOutput", - "DocSummaryPageRequest", + "DocSummaryPageRequestResponseFormatType", "DocSummaryPageRequestSelectedAsrModel", "DocSummaryPageRequestSelectedModel", "DocSummaryPageResponse", "DocSummaryPageStatusResponse", "EmailFaceInpaintingPageOutput", - "EmailFaceInpaintingPageRequest", "EmailFaceInpaintingPageRequestSelectedModel", "EmailFaceInpaintingPageResponse", "EmailFaceInpaintingPageStatusResponse", "EmbeddingsPageOutput", - "EmbeddingsPageRequest", "EmbeddingsPageRequestSelectedModel", "EmbeddingsPageResponse", "EmbeddingsPageStatusResponse", "EvalPrompt", "FaceInpaintingPageOutput", - "FaceInpaintingPageRequest", "FaceInpaintingPageRequestSelectedModel", "FaceInpaintingPageResponse", "FaceInpaintingPageStatusResponse", @@ -371,32 +325,28 @@ "FailedResponseDetail", "FinalResponse", "FunctionsPageOutput", - "FunctionsPageRequest", "FunctionsPageResponse", "FunctionsPageStatusResponse", "GenericErrorResponse", "GenericErrorResponseDetail", "GoogleGptPageOutput", - "GoogleGptPageRequest", "GoogleGptPageRequestEmbeddingModel", + "GoogleGptPageRequestResponseFormatType", "GoogleGptPageRequestSelectedModel", "GoogleGptPageResponse", "GoogleGptPageStatusResponse", "GoogleImageGenPageOutput", - "GoogleImageGenPageRequest", "GoogleImageGenPageRequestSelectedModel", "GoogleImageGenPageResponse", "GoogleImageGenPageStatusResponse", "HttpValidationError", "ImageSegmentationPageOutput", - "ImageSegmentationPageRequest", "ImageSegmentationPageRequestSelectedModel", "ImageSegmentationPageResponse", "ImageSegmentationPageStatusResponse", "ImageUrl", "ImageUrlDetail", "Img2ImgPageOutput", - "Img2ImgPageRequest", "Img2ImgPageRequestSelectedControlnetModel", "Img2ImgPageRequestSelectedControlnetModelItem", "Img2ImgPageRequestSelectedModel", @@ -407,12 +357,9 @@ "LetterWriterPageResponse", "LetterWriterPageStatusResponse", "LipsyncPageOutput", - "LipsyncPageRequest", - "LipsyncPageRequestSelectedModel", "LipsyncPageResponse", "LipsyncPageStatusResponse", "LipsyncTtsPageOutput", - "LipsyncTtsPageRequest", "LipsyncTtsPageRequestOpenaiTtsModel", "LipsyncTtsPageRequestOpenaiVoiceName", "LipsyncTtsPageRequestSelectedModel", @@ -422,14 +369,12 @@ "LlmTools", "MessagePart", "ObjectInpaintingPageOutput", - "ObjectInpaintingPageRequest", "ObjectInpaintingPageRequestSelectedModel", "ObjectInpaintingPageResponse", "ObjectInpaintingPageStatusResponse", "PromptTreeNode", "PromptTreeNodePrompt", "QrCodeGeneratorPageOutput", - "QrCodeGeneratorPageRequest", "QrCodeGeneratorPageRequestImagePromptControlnetModelsItem", "QrCodeGeneratorPageRequestScheduler", "QrCodeGeneratorPageRequestSelectedControlnetModelItem", @@ -442,16 +387,16 @@ "RelatedDocSearchResponse", "RelatedGoogleGptResponse", "RelatedQnADocPageOutput", - "RelatedQnADocPageRequest", "RelatedQnADocPageRequestCitationStyle", "RelatedQnADocPageRequestEmbeddingModel", "RelatedQnADocPageRequestKeywordQuery", + "RelatedQnADocPageRequestResponseFormatType", "RelatedQnADocPageRequestSelectedModel", "RelatedQnADocPageResponse", "RelatedQnADocPageStatusResponse", "RelatedQnAPageOutput", - "RelatedQnAPageRequest", "RelatedQnAPageRequestEmbeddingModel", + "RelatedQnAPageRequestResponseFormatType", "RelatedQnAPageRequestSelectedModel", "RelatedQnAPageResponse", "RelatedQnAPageStatusResponse", @@ -466,29 +411,25 @@ "SadTalkerSettingsPreprocess", "SearchReference", "SeoSummaryPageOutput", - "SeoSummaryPageRequest", + "SeoSummaryPageRequestResponseFormatType", "SeoSummaryPageRequestSelectedModel", "SeoSummaryPageResponse", "SeoSummaryPageStatusResponse", "SerpSearchLocation", "SerpSearchType", "SmartGptPageOutput", - "SmartGptPageRequest", - "SmartGptPageRequestSelectedModel", "SmartGptPageResponse", "SmartGptPageStatusResponse", "SocialLookupEmailPageOutput", - "SocialLookupEmailPageRequest", + "SocialLookupEmailPageRequestResponseFormatType", "SocialLookupEmailPageRequestSelectedModel", "SocialLookupEmailPageResponse", "SocialLookupEmailPageStatusResponse", "StreamError", "Text2AudioPageOutput", - "Text2AudioPageRequest", "Text2AudioPageResponse", "Text2AudioPageStatusResponse", "TextToSpeechPageOutput", - "TextToSpeechPageRequest", "TextToSpeechPageRequestOpenaiTtsModel", "TextToSpeechPageRequestOpenaiVoiceName", "TextToSpeechPageRequestTtsProvider", @@ -496,7 +437,6 @@ "TextToSpeechPageStatusResponse", "TrainingDataModel", "TranslationPageOutput", - "TranslationPageRequest", "TranslationPageRequestSelectedModel", "TranslationPageResponse", "TranslationPageStatusResponse", @@ -506,16 +446,6 @@ "VideoBotsPageOutput", "VideoBotsPageOutputFinalKeywordQuery", "VideoBotsPageOutputFinalPrompt", - "VideoBotsPageRequest", - "VideoBotsPageRequestAsrModel", - "VideoBotsPageRequestCitationStyle", - "VideoBotsPageRequestEmbeddingModel", - "VideoBotsPageRequestLipsyncModel", - "VideoBotsPageRequestOpenaiTtsModel", - "VideoBotsPageRequestOpenaiVoiceName", - "VideoBotsPageRequestSelectedModel", - "VideoBotsPageRequestTranslationModel", - "VideoBotsPageRequestTtsProvider", "VideoBotsPageResponse", "VideoBotsPageStatusResponse", ] diff --git a/src/gooey/types/asr_page_request.py b/src/gooey/types/asr_page_request.py deleted file mode 100644 index 228b6ff..0000000 --- a/src/gooey/types/asr_page_request.py +++ /dev/null @@ -1,49 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .asr_page_request_output_format import AsrPageRequestOutputFormat -from .asr_page_request_selected_model import AsrPageRequestSelectedModel -from .asr_page_request_translation_model import AsrPageRequestTranslationModel -from .recipe_function import RecipeFunction -from .run_settings import RunSettings - - -class AsrPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[RecipeFunction]] = None - variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - documents: typing.List[str] - selected_model: typing.Optional[AsrPageRequestSelectedModel] = None - language: typing.Optional[str] = None - translation_model: typing.Optional[AsrPageRequestTranslationModel] = None - output_format: typing.Optional[AsrPageRequestOutputFormat] = None - google_translate_target: typing.Optional[str] = pydantic.Field(default=None) - """ - use `translation_model` & `translation_target` instead. - """ - - translation_source: typing.Optional[str] = None - translation_target: typing.Optional[str] = None - glossary_document: typing.Optional[str] = pydantic.Field(default=None) - """ - Provide a glossary to customize translation and improve accuracy of domain-specific terms. - If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). - """ - - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/asr_page_request_selected_model.py b/src/gooey/types/asr_page_request_selected_model.py index 5180332..4e80d3c 100644 --- a/src/gooey/types/asr_page_request_selected_model.py +++ b/src/gooey/types/asr_page_request_selected_model.py @@ -15,8 +15,9 @@ "usm", "deepgram", "azure", - "seamless_m4t", + "seamless_m4t_v2", "mms_1b_all", + "seamless_m4t", ], typing.Any, ] diff --git a/src/gooey/types/asr_page_status_response.py b/src/gooey/types/asr_page_status_response.py index 81ec63b..c4e8f2b 100644 --- a/src/gooey/types/asr_page_status_response.py +++ b/src/gooey/types/asr_page_status_response.py @@ -25,7 +25,7 @@ class AsrPageStatusResponse(UniversalBaseModel): Time when the run was created as ISO format """ - run_time_sec: int = pydantic.Field() + run_time_sec: float = pydantic.Field() """ Total run time in seconds """ diff --git a/src/gooey/types/bulk_eval_page_request.py b/src/gooey/types/bulk_eval_page_request.py deleted file mode 100644 index ffa6580..0000000 --- a/src/gooey/types/bulk_eval_page_request.py +++ /dev/null @@ -1,55 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .agg_function import AggFunction -from .bulk_eval_page_request_selected_model import BulkEvalPageRequestSelectedModel -from .eval_prompt import EvalPrompt -from .recipe_function import RecipeFunction -from .run_settings import RunSettings - - -class BulkEvalPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[RecipeFunction]] = None - variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - selected_model: typing.Optional[BulkEvalPageRequestSelectedModel] = None - avoid_repetition: typing.Optional[bool] = None - num_outputs: typing.Optional[int] = None - quality: typing.Optional[float] = None - max_tokens: typing.Optional[int] = None - sampling_temperature: typing.Optional[float] = None - documents: typing.List[str] = pydantic.Field() - """ - Upload or link to a CSV or google sheet that contains your sample input data. - For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. - Remember to includes header names in your CSV too. - """ - - eval_prompts: typing.Optional[typing.List[EvalPrompt]] = pydantic.Field(default=None) - """ - Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values. - _The `columns` dictionary can be used to reference the spreadsheet columns._ - """ - - agg_functions: typing.Optional[typing.List[AggFunction]] = pydantic.Field(default=None) - """ - Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats). - """ - - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/bulk_eval_page_status_response.py b/src/gooey/types/bulk_eval_page_status_response.py index 4a171e5..e788c4a 100644 --- a/src/gooey/types/bulk_eval_page_status_response.py +++ b/src/gooey/types/bulk_eval_page_status_response.py @@ -25,7 +25,7 @@ class BulkEvalPageStatusResponse(UniversalBaseModel): Time when the run was created as ISO format """ - run_time_sec: int = pydantic.Field() + run_time_sec: float = pydantic.Field() """ Total run time in seconds """ diff --git a/src/gooey/types/bulk_runner_page_request.py b/src/gooey/types/bulk_runner_page_request.py deleted file mode 100644 index d785c72..0000000 --- a/src/gooey/types/bulk_runner_page_request.py +++ /dev/null @@ -1,56 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .recipe_function import RecipeFunction -from .run_settings import RunSettings - - -class BulkRunnerPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[RecipeFunction]] = None - variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - documents: typing.List[str] = pydantic.Field() - """ - Upload or link to a CSV or google sheet that contains your sample input data. - For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. - Remember to includes header names in your CSV too. - """ - - run_urls: typing.List[str] = pydantic.Field() - """ - Provide one or more Gooey.AI workflow runs. - You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them. - """ - - input_columns: typing.Dict[str, str] = pydantic.Field() - """ - For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it. - """ - - output_columns: typing.Dict[str, str] = pydantic.Field() - """ - For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data. - """ - - eval_urls: typing.Optional[typing.List[str]] = pydantic.Field(default=None) - """ - _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs. - """ - - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/bulk_runner_page_status_response.py b/src/gooey/types/bulk_runner_page_status_response.py index a4f3951..dfb46e0 100644 --- a/src/gooey/types/bulk_runner_page_status_response.py +++ b/src/gooey/types/bulk_runner_page_status_response.py @@ -25,7 +25,7 @@ class BulkRunnerPageStatusResponse(UniversalBaseModel): Time when the run was created as ISO format """ - run_time_sec: int = pydantic.Field() + run_time_sec: float = pydantic.Field() """ Total run time in seconds """ diff --git a/src/gooey/types/chyron_plant_page_status_response.py b/src/gooey/types/chyron_plant_page_status_response.py index 534d19b..c699269 100644 --- a/src/gooey/types/chyron_plant_page_status_response.py +++ b/src/gooey/types/chyron_plant_page_status_response.py @@ -25,7 +25,7 @@ class ChyronPlantPageStatusResponse(UniversalBaseModel): Time when the run was created as ISO format """ - run_time_sec: int = pydantic.Field() + run_time_sec: float = pydantic.Field() """ Total run time in seconds """ diff --git a/src/gooey/types/compare_llm_page_request.py b/src/gooey/types/compare_llm_page_request.py deleted file mode 100644 index 87ae925..0000000 --- a/src/gooey/types/compare_llm_page_request.py +++ /dev/null @@ -1,38 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .compare_llm_page_request_response_format_type import CompareLlmPageRequestResponseFormatType -from .compare_llm_page_request_selected_models_item import CompareLlmPageRequestSelectedModelsItem -from .recipe_function import RecipeFunction -from .run_settings import RunSettings - - -class CompareLlmPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[RecipeFunction]] = None - variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - input_prompt: typing.Optional[str] = None - selected_models: typing.Optional[typing.List[CompareLlmPageRequestSelectedModelsItem]] = None - avoid_repetition: typing.Optional[bool] = None - num_outputs: typing.Optional[int] = None - quality: typing.Optional[float] = None - max_tokens: typing.Optional[int] = None - sampling_temperature: typing.Optional[float] = None - response_format_type: typing.Optional[CompareLlmPageRequestResponseFormatType] = None - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/compare_llm_page_request_selected_models_item.py b/src/gooey/types/compare_llm_page_request_selected_models_item.py index 14654d5..d3564b6 100644 --- a/src/gooey/types/compare_llm_page_request_selected_models_item.py +++ b/src/gooey/types/compare_llm_page_request_selected_models_item.py @@ -5,6 +5,8 @@ CompareLlmPageRequestSelectedModelsItem = typing.Union[ typing.Literal[ "gpt_4_o", + "gpt_4_o_mini", + "chatgpt_4_o", "gpt_4_turbo_vision", "gpt_4_vision", "gpt_4_turbo", @@ -14,10 +16,14 @@ "gpt_3_5_turbo_16k", "gpt_3_5_turbo_instruct", "llama3_70b", + "llama_3_groq_70b_tool_use", "llama3_8b", + "llama_3_groq_8b_tool_use", "llama2_70b_chat", "mixtral_8x7b_instruct_0_1", + "gemma_2_9b_it", "gemma_7b_it", + "gemini_1_5_flash", "gemini_1_5_pro", "gemini_1_pro_vision", "gemini_1_pro", @@ -28,6 +34,8 @@ "claude_3_sonnet", "claude_3_haiku", "sea_lion_7b_instruct", + "llama3_8b_cpt_sea_lion_v2_instruct", + "sarvam_2b", "text_davinci_003", "text_davinci_002", "code_davinci_002", diff --git a/src/gooey/types/compare_llm_page_status_response.py b/src/gooey/types/compare_llm_page_status_response.py index 878d5ac..b7d0d23 100644 --- a/src/gooey/types/compare_llm_page_status_response.py +++ b/src/gooey/types/compare_llm_page_status_response.py @@ -25,7 +25,7 @@ class CompareLlmPageStatusResponse(UniversalBaseModel): Time when the run was created as ISO format """ - run_time_sec: int = pydantic.Field() + run_time_sec: float = pydantic.Field() """ Total run time in seconds """ diff --git a/src/gooey/types/compare_text2img_page_request.py b/src/gooey/types/compare_text2img_page_request.py deleted file mode 100644 index fbfeb11..0000000 --- a/src/gooey/types/compare_text2img_page_request.py +++ /dev/null @@ -1,45 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .compare_text2img_page_request_scheduler import CompareText2ImgPageRequestScheduler -from .compare_text2img_page_request_selected_models_item import CompareText2ImgPageRequestSelectedModelsItem -from .recipe_function import RecipeFunction -from .run_settings import RunSettings - - -class CompareText2ImgPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[RecipeFunction]] = None - variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - text_prompt: str - negative_prompt: typing.Optional[str] = None - output_width: typing.Optional[int] = None - output_height: typing.Optional[int] = None - num_outputs: typing.Optional[int] = None - quality: typing.Optional[int] = None - dall_e3quality: typing.Optional[str] = pydantic.Field(alias="dall_e_3_quality", default=None) - dall_e3style: typing.Optional[str] = pydantic.Field(alias="dall_e_3_style", default=None) - guidance_scale: typing.Optional[float] = None - seed: typing.Optional[int] = None - sd2upscaling: typing.Optional[bool] = pydantic.Field(alias="sd_2_upscaling", default=None) - selected_models: typing.Optional[typing.List[CompareText2ImgPageRequestSelectedModelsItem]] = None - scheduler: typing.Optional[CompareText2ImgPageRequestScheduler] = None - edit_instruction: typing.Optional[str] = None - image_guidance_scale: typing.Optional[float] = None - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/compare_text2img_page_status_response.py b/src/gooey/types/compare_text2img_page_status_response.py index 194c934..73c070b 100644 --- a/src/gooey/types/compare_text2img_page_status_response.py +++ b/src/gooey/types/compare_text2img_page_status_response.py @@ -25,7 +25,7 @@ class CompareText2ImgPageStatusResponse(UniversalBaseModel): Time when the run was created as ISO format """ - run_time_sec: int = pydantic.Field() + run_time_sec: float = pydantic.Field() """ Total run time in seconds """ diff --git a/src/gooey/types/compare_upscaler_page_request.py b/src/gooey/types/compare_upscaler_page_request.py deleted file mode 100644 index 00411a5..0000000 --- a/src/gooey/types/compare_upscaler_page_request.py +++ /dev/null @@ -1,46 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .compare_upscaler_page_request_selected_models_item import CompareUpscalerPageRequestSelectedModelsItem -from .recipe_function import RecipeFunction -from .run_settings import RunSettings - - -class CompareUpscalerPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[RecipeFunction]] = None - variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - input_image: typing.Optional[str] = pydantic.Field(default=None) - """ - Input Image - """ - - input_video: typing.Optional[str] = pydantic.Field(default=None) - """ - Input Video - """ - - scale: int = pydantic.Field() - """ - The final upsampling scale of the image - """ - - selected_models: typing.Optional[typing.List[CompareUpscalerPageRequestSelectedModelsItem]] = None - selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = None - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/compare_upscaler_page_status_response.py b/src/gooey/types/compare_upscaler_page_status_response.py index e6c94a1..179e2c5 100644 --- a/src/gooey/types/compare_upscaler_page_status_response.py +++ b/src/gooey/types/compare_upscaler_page_status_response.py @@ -25,7 +25,7 @@ class CompareUpscalerPageStatusResponse(UniversalBaseModel): Time when the run was created as ISO format """ - run_time_sec: int = pydantic.Field() + run_time_sec: float = pydantic.Field() """ Total run time in seconds """ diff --git a/src/gooey/types/deforum_sd_page_request.py b/src/gooey/types/deforum_sd_page_request.py deleted file mode 100644 index 79f6d06..0000000 --- a/src/gooey/types/deforum_sd_page_request.py +++ /dev/null @@ -1,42 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .animation_prompt import AnimationPrompt -from .deforum_sd_page_request_selected_model import DeforumSdPageRequestSelectedModel -from .recipe_function import RecipeFunction -from .run_settings import RunSettings - - -class DeforumSdPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[RecipeFunction]] = None - variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - animation_prompts: typing.List[AnimationPrompt] - max_frames: typing.Optional[int] = None - selected_model: typing.Optional[DeforumSdPageRequestSelectedModel] = None - animation_mode: typing.Optional[str] = None - zoom: typing.Optional[str] = None - translation_x: typing.Optional[str] = None - translation_y: typing.Optional[str] = None - rotation3d_x: typing.Optional[str] = pydantic.Field(alias="rotation_3d_x", default=None) - rotation3d_y: typing.Optional[str] = pydantic.Field(alias="rotation_3d_y", default=None) - rotation3d_z: typing.Optional[str] = pydantic.Field(alias="rotation_3d_z", default=None) - fps: typing.Optional[int] = None - seed: typing.Optional[int] = None - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/deforum_sd_page_status_response.py b/src/gooey/types/deforum_sd_page_status_response.py index bfc9685..9376f4f 100644 --- a/src/gooey/types/deforum_sd_page_status_response.py +++ b/src/gooey/types/deforum_sd_page_status_response.py @@ -25,7 +25,7 @@ class DeforumSdPageStatusResponse(UniversalBaseModel): Time when the run was created as ISO format """ - run_time_sec: int = pydantic.Field() + run_time_sec: float = pydantic.Field() """ Total run time in seconds """ diff --git a/src/gooey/types/doc_extract_page_output.py b/src/gooey/types/doc_extract_page_output.py index c11791c..afc0077 100644 --- a/src/gooey/types/doc_extract_page_output.py +++ b/src/gooey/types/doc_extract_page_output.py @@ -9,6 +9,7 @@ class DocExtractPageOutput(UniversalBaseModel): + output_documents: typing.Optional[typing.List[str]] = None called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None if IS_PYDANTIC_V2: diff --git a/src/gooey/types/doc_extract_page_request.py b/src/gooey/types/doc_extract_page_request.py deleted file mode 100644 index 749620a..0000000 --- a/src/gooey/types/doc_extract_page_request.py +++ /dev/null @@ -1,47 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .doc_extract_page_request_selected_asr_model import DocExtractPageRequestSelectedAsrModel -from .doc_extract_page_request_selected_model import DocExtractPageRequestSelectedModel -from .recipe_function import RecipeFunction -from .run_settings import RunSettings - - -class DocExtractPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[RecipeFunction]] = None - variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - documents: typing.List[str] - sheet_url: typing.Optional[str] = None - selected_asr_model: typing.Optional[DocExtractPageRequestSelectedAsrModel] = None - google_translate_target: typing.Optional[str] = None - glossary_document: typing.Optional[str] = pydantic.Field(default=None) - """ - Provide a glossary to customize translation and improve accuracy of domain-specific terms. - If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). - """ - - task_instructions: typing.Optional[str] = None - selected_model: typing.Optional[DocExtractPageRequestSelectedModel] = None - avoid_repetition: typing.Optional[bool] = None - num_outputs: typing.Optional[int] = None - quality: typing.Optional[float] = None - max_tokens: typing.Optional[int] = None - sampling_temperature: typing.Optional[float] = None - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/doc_extract_page_request_response_format_type.py b/src/gooey/types/doc_extract_page_request_response_format_type.py new file mode 100644 index 0000000..0ad7c14 --- /dev/null +++ b/src/gooey/types/doc_extract_page_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +DocExtractPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/doc_extract_page_request_selected_asr_model.py b/src/gooey/types/doc_extract_page_request_selected_asr_model.py index 1640e23..a358400 100644 --- a/src/gooey/types/doc_extract_page_request_selected_asr_model.py +++ b/src/gooey/types/doc_extract_page_request_selected_asr_model.py @@ -15,8 +15,9 @@ "usm", "deepgram", "azure", - "seamless_m4t", + "seamless_m4t_v2", "mms_1b_all", + "seamless_m4t", ], typing.Any, ] diff --git a/src/gooey/types/doc_extract_page_request_selected_model.py b/src/gooey/types/doc_extract_page_request_selected_model.py index 32fc17b..1872929 100644 --- a/src/gooey/types/doc_extract_page_request_selected_model.py +++ b/src/gooey/types/doc_extract_page_request_selected_model.py @@ -5,6 +5,8 @@ DocExtractPageRequestSelectedModel = typing.Union[ typing.Literal[ "gpt_4_o", + "gpt_4_o_mini", + "chatgpt_4_o", "gpt_4_turbo_vision", "gpt_4_vision", "gpt_4_turbo", @@ -14,10 +16,14 @@ "gpt_3_5_turbo_16k", "gpt_3_5_turbo_instruct", "llama3_70b", + "llama_3_groq_70b_tool_use", "llama3_8b", + "llama_3_groq_8b_tool_use", "llama2_70b_chat", "mixtral_8x7b_instruct_0_1", + "gemma_2_9b_it", "gemma_7b_it", + "gemini_1_5_flash", "gemini_1_5_pro", "gemini_1_pro_vision", "gemini_1_pro", @@ -28,6 +34,8 @@ "claude_3_sonnet", "claude_3_haiku", "sea_lion_7b_instruct", + "llama3_8b_cpt_sea_lion_v2_instruct", + "sarvam_2b", "text_davinci_003", "text_davinci_002", "code_davinci_002", diff --git a/src/gooey/types/doc_extract_page_status_response.py b/src/gooey/types/doc_extract_page_status_response.py index 99e2ade..409989f 100644 --- a/src/gooey/types/doc_extract_page_status_response.py +++ b/src/gooey/types/doc_extract_page_status_response.py @@ -25,7 +25,7 @@ class DocExtractPageStatusResponse(UniversalBaseModel): Time when the run was created as ISO format """ - run_time_sec: int = pydantic.Field() + run_time_sec: float = pydantic.Field() """ Total run time in seconds """ diff --git a/src/gooey/types/doc_search_page_request.py b/src/gooey/types/doc_search_page_request.py deleted file mode 100644 index e6705b8..0000000 --- a/src/gooey/types/doc_search_page_request.py +++ /dev/null @@ -1,55 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .doc_search_page_request_citation_style import DocSearchPageRequestCitationStyle -from .doc_search_page_request_embedding_model import DocSearchPageRequestEmbeddingModel -from .doc_search_page_request_keyword_query import DocSearchPageRequestKeywordQuery -from .doc_search_page_request_selected_model import DocSearchPageRequestSelectedModel -from .recipe_function import RecipeFunction -from .run_settings import RunSettings - - -class DocSearchPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[RecipeFunction]] = None - variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - search_query: str - keyword_query: typing.Optional[DocSearchPageRequestKeywordQuery] = None - documents: typing.Optional[typing.List[str]] = None - max_references: typing.Optional[int] = None - max_context_words: typing.Optional[int] = None - scroll_jump: typing.Optional[int] = None - doc_extract_url: typing.Optional[str] = None - embedding_model: typing.Optional[DocSearchPageRequestEmbeddingModel] = None - dense_weight: typing.Optional[float] = pydantic.Field(default=None) - """ - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - """ - - task_instructions: typing.Optional[str] = None - query_instructions: typing.Optional[str] = None - selected_model: typing.Optional[DocSearchPageRequestSelectedModel] = None - avoid_repetition: typing.Optional[bool] = None - num_outputs: typing.Optional[int] = None - quality: typing.Optional[float] = None - max_tokens: typing.Optional[int] = None - sampling_temperature: typing.Optional[float] = None - citation_style: typing.Optional[DocSearchPageRequestCitationStyle] = None - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/doc_search_page_request_response_format_type.py b/src/gooey/types/doc_search_page_request_response_format_type.py new file mode 100644 index 0000000..856b641 --- /dev/null +++ b/src/gooey/types/doc_search_page_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +DocSearchPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/doc_search_page_request_selected_model.py b/src/gooey/types/doc_search_page_request_selected_model.py index 0c88fb5..3b793b6 100644 --- a/src/gooey/types/doc_search_page_request_selected_model.py +++ b/src/gooey/types/doc_search_page_request_selected_model.py @@ -5,6 +5,8 @@ DocSearchPageRequestSelectedModel = typing.Union[ typing.Literal[ "gpt_4_o", + "gpt_4_o_mini", + "chatgpt_4_o", "gpt_4_turbo_vision", "gpt_4_vision", "gpt_4_turbo", @@ -14,10 +16,14 @@ "gpt_3_5_turbo_16k", "gpt_3_5_turbo_instruct", "llama3_70b", + "llama_3_groq_70b_tool_use", "llama3_8b", + "llama_3_groq_8b_tool_use", "llama2_70b_chat", "mixtral_8x7b_instruct_0_1", + "gemma_2_9b_it", "gemma_7b_it", + "gemini_1_5_flash", "gemini_1_5_pro", "gemini_1_pro_vision", "gemini_1_pro", @@ -28,6 +34,8 @@ "claude_3_sonnet", "claude_3_haiku", "sea_lion_7b_instruct", + "llama3_8b_cpt_sea_lion_v2_instruct", + "sarvam_2b", "text_davinci_003", "text_davinci_002", "code_davinci_002", diff --git a/src/gooey/types/doc_search_page_status_response.py b/src/gooey/types/doc_search_page_status_response.py index 9341296..dcbb56a 100644 --- a/src/gooey/types/doc_search_page_status_response.py +++ b/src/gooey/types/doc_search_page_status_response.py @@ -25,7 +25,7 @@ class DocSearchPageStatusResponse(UniversalBaseModel): Time when the run was created as ISO format """ - run_time_sec: int = pydantic.Field() + run_time_sec: float = pydantic.Field() """ Total run time in seconds """ diff --git a/src/gooey/types/doc_summary_page_request.py b/src/gooey/types/doc_summary_page_request.py deleted file mode 100644 index 2b54fde..0000000 --- a/src/gooey/types/doc_summary_page_request.py +++ /dev/null @@ -1,42 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .doc_summary_page_request_selected_asr_model import DocSummaryPageRequestSelectedAsrModel -from .doc_summary_page_request_selected_model import DocSummaryPageRequestSelectedModel -from .recipe_function import RecipeFunction -from .run_settings import RunSettings - - -class DocSummaryPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[RecipeFunction]] = None - variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - documents: typing.List[str] - task_instructions: typing.Optional[str] = None - merge_instructions: typing.Optional[str] = None - selected_model: typing.Optional[DocSummaryPageRequestSelectedModel] = None - avoid_repetition: typing.Optional[bool] = None - num_outputs: typing.Optional[int] = None - quality: typing.Optional[float] = None - max_tokens: typing.Optional[int] = None - sampling_temperature: typing.Optional[float] = None - chain_type: typing.Optional[typing.Literal["map_reduce"]] = None - selected_asr_model: typing.Optional[DocSummaryPageRequestSelectedAsrModel] = None - google_translate_target: typing.Optional[str] = None - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/doc_summary_page_request_response_format_type.py b/src/gooey/types/doc_summary_page_request_response_format_type.py new file mode 100644 index 0000000..318ad7f --- /dev/null +++ b/src/gooey/types/doc_summary_page_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +DocSummaryPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/doc_summary_page_request_selected_asr_model.py b/src/gooey/types/doc_summary_page_request_selected_asr_model.py index d189aa1..c04cc7a 100644 --- a/src/gooey/types/doc_summary_page_request_selected_asr_model.py +++ b/src/gooey/types/doc_summary_page_request_selected_asr_model.py @@ -15,8 +15,9 @@ "usm", "deepgram", "azure", - "seamless_m4t", + "seamless_m4t_v2", "mms_1b_all", + "seamless_m4t", ], typing.Any, ] diff --git a/src/gooey/types/doc_summary_page_request_selected_model.py b/src/gooey/types/doc_summary_page_request_selected_model.py index 55e97da..6da70f6 100644 --- a/src/gooey/types/doc_summary_page_request_selected_model.py +++ b/src/gooey/types/doc_summary_page_request_selected_model.py @@ -5,6 +5,8 @@ DocSummaryPageRequestSelectedModel = typing.Union[ typing.Literal[ "gpt_4_o", + "gpt_4_o_mini", + "chatgpt_4_o", "gpt_4_turbo_vision", "gpt_4_vision", "gpt_4_turbo", @@ -14,10 +16,14 @@ "gpt_3_5_turbo_16k", "gpt_3_5_turbo_instruct", "llama3_70b", + "llama_3_groq_70b_tool_use", "llama3_8b", + "llama_3_groq_8b_tool_use", "llama2_70b_chat", "mixtral_8x7b_instruct_0_1", + "gemma_2_9b_it", "gemma_7b_it", + "gemini_1_5_flash", "gemini_1_5_pro", "gemini_1_pro_vision", "gemini_1_pro", @@ -28,6 +34,8 @@ "claude_3_sonnet", "claude_3_haiku", "sea_lion_7b_instruct", + "llama3_8b_cpt_sea_lion_v2_instruct", + "sarvam_2b", "text_davinci_003", "text_davinci_002", "code_davinci_002", diff --git a/src/gooey/types/doc_summary_page_status_response.py b/src/gooey/types/doc_summary_page_status_response.py index 3a4b5aa..7899397 100644 --- a/src/gooey/types/doc_summary_page_status_response.py +++ b/src/gooey/types/doc_summary_page_status_response.py @@ -25,7 +25,7 @@ class DocSummaryPageStatusResponse(UniversalBaseModel): Time when the run was created as ISO format """ - run_time_sec: int = pydantic.Field() + run_time_sec: float = pydantic.Field() """ Total run time in seconds """ diff --git a/src/gooey/types/email_face_inpainting_page_request.py b/src/gooey/types/email_face_inpainting_page_request.py deleted file mode 100644 index 07f4660..0000000 --- a/src/gooey/types/email_face_inpainting_page_request.py +++ /dev/null @@ -1,52 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .email_face_inpainting_page_request_selected_model import EmailFaceInpaintingPageRequestSelectedModel -from .recipe_function import RecipeFunction -from .run_settings import RunSettings - - -class EmailFaceInpaintingPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[RecipeFunction]] = None - variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - email_address: typing.Optional[str] = None - twitter_handle: typing.Optional[str] = None - text_prompt: str - face_scale: typing.Optional[float] = None - face_pos_x: typing.Optional[float] = None - face_pos_y: typing.Optional[float] = None - selected_model: typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] = None - negative_prompt: typing.Optional[str] = None - num_outputs: typing.Optional[int] = None - quality: typing.Optional[int] = None - upscale_factor: typing.Optional[float] = None - output_width: typing.Optional[int] = None - output_height: typing.Optional[int] = None - guidance_scale: typing.Optional[float] = None - should_send_email: typing.Optional[bool] = None - email_from: typing.Optional[str] = None - email_cc: typing.Optional[str] = None - email_bcc: typing.Optional[str] = None - email_subject: typing.Optional[str] = None - email_body: typing.Optional[str] = None - email_body_enable_html: typing.Optional[bool] = None - fallback_email_body: typing.Optional[str] = None - seed: typing.Optional[int] = None - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/email_face_inpainting_page_status_response.py b/src/gooey/types/email_face_inpainting_page_status_response.py index d526f9f..97e62e0 100644 --- a/src/gooey/types/email_face_inpainting_page_status_response.py +++ b/src/gooey/types/email_face_inpainting_page_status_response.py @@ -25,7 +25,7 @@ class EmailFaceInpaintingPageStatusResponse(UniversalBaseModel): Time when the run was created as ISO format """ - run_time_sec: int = pydantic.Field() + run_time_sec: float = pydantic.Field() """ Total run time in seconds """ diff --git a/src/gooey/types/embeddings_page_request.py b/src/gooey/types/embeddings_page_request.py deleted file mode 100644 index 9e67171..0000000 --- a/src/gooey/types/embeddings_page_request.py +++ /dev/null @@ -1,31 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .embeddings_page_request_selected_model import EmbeddingsPageRequestSelectedModel -from .recipe_function import RecipeFunction -from .run_settings import RunSettings - - -class EmbeddingsPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[RecipeFunction]] = None - variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - texts: typing.List[str] - selected_model: typing.Optional[EmbeddingsPageRequestSelectedModel] = None - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/embeddings_page_status_response.py b/src/gooey/types/embeddings_page_status_response.py index 927d881..ff72003 100644 --- a/src/gooey/types/embeddings_page_status_response.py +++ b/src/gooey/types/embeddings_page_status_response.py @@ -25,7 +25,7 @@ class EmbeddingsPageStatusResponse(UniversalBaseModel): Time when the run was created as ISO format """ - run_time_sec: int = pydantic.Field() + run_time_sec: float = pydantic.Field() """ Total run time in seconds """ diff --git a/src/gooey/types/face_inpainting_page_request.py b/src/gooey/types/face_inpainting_page_request.py deleted file mode 100644 index 868b53b..0000000 --- a/src/gooey/types/face_inpainting_page_request.py +++ /dev/null @@ -1,43 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .face_inpainting_page_request_selected_model import FaceInpaintingPageRequestSelectedModel -from .recipe_function import RecipeFunction -from .run_settings import RunSettings - - -class FaceInpaintingPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[RecipeFunction]] = None - variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - input_image: str - text_prompt: str - face_scale: typing.Optional[float] = None - face_pos_x: typing.Optional[float] = None - face_pos_y: typing.Optional[float] = None - selected_model: typing.Optional[FaceInpaintingPageRequestSelectedModel] = None - negative_prompt: typing.Optional[str] = None - num_outputs: typing.Optional[int] = None - quality: typing.Optional[int] = None - upscale_factor: typing.Optional[float] = None - output_width: typing.Optional[int] = None - output_height: typing.Optional[int] = None - guidance_scale: typing.Optional[float] = None - seed: typing.Optional[int] = None - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/face_inpainting_page_status_response.py b/src/gooey/types/face_inpainting_page_status_response.py index 2d02e9f..42c0b2e 100644 --- a/src/gooey/types/face_inpainting_page_status_response.py +++ b/src/gooey/types/face_inpainting_page_status_response.py @@ -25,7 +25,7 @@ class FaceInpaintingPageStatusResponse(UniversalBaseModel): Time when the run was created as ISO format """ - run_time_sec: int = pydantic.Field() + run_time_sec: float = pydantic.Field() """ Total run time in seconds """ diff --git a/src/gooey/types/final_response.py b/src/gooey/types/final_response.py index 64a56e4..3987c27 100644 --- a/src/gooey/types/final_response.py +++ b/src/gooey/types/final_response.py @@ -25,7 +25,7 @@ class FinalResponse(UniversalBaseModel): Time when the run was created as ISO format """ - run_time_sec: int = pydantic.Field() + run_time_sec: float = pydantic.Field() """ Total run time in seconds """ diff --git a/src/gooey/types/functions_page_request.py b/src/gooey/types/functions_page_request.py deleted file mode 100644 index 30406dd..0000000 --- a/src/gooey/types/functions_page_request.py +++ /dev/null @@ -1,31 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .run_settings import RunSettings - - -class FunctionsPageRequest(UniversalBaseModel): - code: typing.Optional[str] = pydantic.Field(default=None) - """ - The JS code to be executed. - """ - - variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None) - """ - Variables to be used in the code - """ - - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/functions_page_status_response.py b/src/gooey/types/functions_page_status_response.py index a4cee87..0f5d4c2 100644 --- a/src/gooey/types/functions_page_status_response.py +++ b/src/gooey/types/functions_page_status_response.py @@ -25,7 +25,7 @@ class FunctionsPageStatusResponse(UniversalBaseModel): Time when the run was created as ISO format """ - run_time_sec: int = pydantic.Field() + run_time_sec: float = pydantic.Field() """ Total run time in seconds """ diff --git a/src/gooey/types/google_gpt_page_request.py b/src/gooey/types/google_gpt_page_request.py deleted file mode 100644 index babbb8d..0000000 --- a/src/gooey/types/google_gpt_page_request.py +++ /dev/null @@ -1,65 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .google_gpt_page_request_embedding_model import GoogleGptPageRequestEmbeddingModel -from .google_gpt_page_request_selected_model import GoogleGptPageRequestSelectedModel -from .recipe_function import RecipeFunction -from .run_settings import RunSettings -from .serp_search_location import SerpSearchLocation -from .serp_search_type import SerpSearchType - - -class GoogleGptPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[RecipeFunction]] = None - variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - serp_search_location: typing.Optional[SerpSearchLocation] = None - scaleserp_locations: typing.Optional[typing.List[str]] = pydantic.Field(default=None) - """ - DEPRECATED: use `serp_search_location` instead - """ - - serp_search_type: typing.Optional[SerpSearchType] = None - scaleserp_search_field: typing.Optional[str] = pydantic.Field(default=None) - """ - DEPRECATED: use `serp_search_type` instead - """ - - search_query: str - site_filter: str - task_instructions: typing.Optional[str] = None - query_instructions: typing.Optional[str] = None - selected_model: typing.Optional[GoogleGptPageRequestSelectedModel] = None - avoid_repetition: typing.Optional[bool] = None - num_outputs: typing.Optional[int] = None - quality: typing.Optional[float] = None - max_tokens: typing.Optional[int] = None - sampling_temperature: typing.Optional[float] = None - max_search_urls: typing.Optional[int] = None - max_references: typing.Optional[int] = None - max_context_words: typing.Optional[int] = None - scroll_jump: typing.Optional[int] = None - embedding_model: typing.Optional[GoogleGptPageRequestEmbeddingModel] = None - dense_weight: typing.Optional[float] = pydantic.Field(default=None) - """ - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - """ - - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/google_gpt_page_request_response_format_type.py b/src/gooey/types/google_gpt_page_request_response_format_type.py new file mode 100644 index 0000000..dd04dec --- /dev/null +++ b/src/gooey/types/google_gpt_page_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +GoogleGptPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/google_gpt_page_request_selected_model.py b/src/gooey/types/google_gpt_page_request_selected_model.py index 8d72870..719ae61 100644 --- a/src/gooey/types/google_gpt_page_request_selected_model.py +++ b/src/gooey/types/google_gpt_page_request_selected_model.py @@ -5,6 +5,8 @@ GoogleGptPageRequestSelectedModel = typing.Union[ typing.Literal[ "gpt_4_o", + "gpt_4_o_mini", + "chatgpt_4_o", "gpt_4_turbo_vision", "gpt_4_vision", "gpt_4_turbo", @@ -14,10 +16,14 @@ "gpt_3_5_turbo_16k", "gpt_3_5_turbo_instruct", "llama3_70b", + "llama_3_groq_70b_tool_use", "llama3_8b", + "llama_3_groq_8b_tool_use", "llama2_70b_chat", "mixtral_8x7b_instruct_0_1", + "gemma_2_9b_it", "gemma_7b_it", + "gemini_1_5_flash", "gemini_1_5_pro", "gemini_1_pro_vision", "gemini_1_pro", @@ -28,6 +34,8 @@ "claude_3_sonnet", "claude_3_haiku", "sea_lion_7b_instruct", + "llama3_8b_cpt_sea_lion_v2_instruct", + "sarvam_2b", "text_davinci_003", "text_davinci_002", "code_davinci_002", diff --git a/src/gooey/types/google_gpt_page_status_response.py b/src/gooey/types/google_gpt_page_status_response.py index 2445778..43ea5a7 100644 --- a/src/gooey/types/google_gpt_page_status_response.py +++ b/src/gooey/types/google_gpt_page_status_response.py @@ -25,7 +25,7 @@ class GoogleGptPageStatusResponse(UniversalBaseModel): Time when the run was created as ISO format """ - run_time_sec: int = pydantic.Field() + run_time_sec: float = pydantic.Field() """ Total run time in seconds """ diff --git a/src/gooey/types/google_image_gen_page_request.py b/src/gooey/types/google_image_gen_page_request.py deleted file mode 100644 index 8e1360b..0000000 --- a/src/gooey/types/google_image_gen_page_request.py +++ /dev/null @@ -1,47 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .google_image_gen_page_request_selected_model import GoogleImageGenPageRequestSelectedModel -from .recipe_function import RecipeFunction -from .run_settings import RunSettings -from .serp_search_location import SerpSearchLocation - - -class GoogleImageGenPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[RecipeFunction]] = None - variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - serp_search_location: typing.Optional[SerpSearchLocation] = None - scaleserp_locations: typing.Optional[typing.List[str]] = pydantic.Field(default=None) - """ - DEPRECATED: use `serp_search_location` instead - """ - - search_query: str - text_prompt: str - selected_model: typing.Optional[GoogleImageGenPageRequestSelectedModel] = None - negative_prompt: typing.Optional[str] = None - num_outputs: typing.Optional[int] = None - quality: typing.Optional[int] = None - guidance_scale: typing.Optional[float] = None - prompt_strength: typing.Optional[float] = None - sd2upscaling: typing.Optional[bool] = pydantic.Field(alias="sd_2_upscaling", default=None) - seed: typing.Optional[int] = None - image_guidance_scale: typing.Optional[float] = None - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/google_image_gen_page_status_response.py b/src/gooey/types/google_image_gen_page_status_response.py index d8b64e0..9aac44f 100644 --- a/src/gooey/types/google_image_gen_page_status_response.py +++ b/src/gooey/types/google_image_gen_page_status_response.py @@ -25,7 +25,7 @@ class GoogleImageGenPageStatusResponse(UniversalBaseModel): Time when the run was created as ISO format """ - run_time_sec: int = pydantic.Field() + run_time_sec: float = pydantic.Field() """ Total run time in seconds """ diff --git a/src/gooey/types/image_segmentation_page_request.py b/src/gooey/types/image_segmentation_page_request.py deleted file mode 100644 index 9f2bc39..0000000 --- a/src/gooey/types/image_segmentation_page_request.py +++ /dev/null @@ -1,37 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .image_segmentation_page_request_selected_model import ImageSegmentationPageRequestSelectedModel -from .recipe_function import RecipeFunction -from .run_settings import RunSettings - - -class ImageSegmentationPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[RecipeFunction]] = None - variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - input_image: str - selected_model: typing.Optional[ImageSegmentationPageRequestSelectedModel] = None - mask_threshold: typing.Optional[float] = None - rect_persepective_transform: typing.Optional[bool] = None - reflection_opacity: typing.Optional[float] = None - obj_scale: typing.Optional[float] = None - obj_pos_x: typing.Optional[float] = None - obj_pos_y: typing.Optional[float] = None - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/image_segmentation_page_status_response.py b/src/gooey/types/image_segmentation_page_status_response.py index 85decb2..6ea0ca9 100644 --- a/src/gooey/types/image_segmentation_page_status_response.py +++ b/src/gooey/types/image_segmentation_page_status_response.py @@ -25,7 +25,7 @@ class ImageSegmentationPageStatusResponse(UniversalBaseModel): Time when the run was created as ISO format """ - run_time_sec: int = pydantic.Field() + run_time_sec: float = pydantic.Field() """ Total run time in seconds """ diff --git a/src/gooey/types/img2img_page_request.py b/src/gooey/types/img2img_page_request.py deleted file mode 100644 index 818cecb..0000000 --- a/src/gooey/types/img2img_page_request.py +++ /dev/null @@ -1,44 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .img2img_page_request_selected_controlnet_model import Img2ImgPageRequestSelectedControlnetModel -from .img2img_page_request_selected_model import Img2ImgPageRequestSelectedModel -from .recipe_function import RecipeFunction -from .run_settings import RunSettings - - -class Img2ImgPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[RecipeFunction]] = None - variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - input_image: str - text_prompt: typing.Optional[str] = None - selected_model: typing.Optional[Img2ImgPageRequestSelectedModel] = None - selected_controlnet_model: typing.Optional[Img2ImgPageRequestSelectedControlnetModel] = None - negative_prompt: typing.Optional[str] = None - num_outputs: typing.Optional[int] = None - quality: typing.Optional[int] = None - output_width: typing.Optional[int] = None - output_height: typing.Optional[int] = None - guidance_scale: typing.Optional[float] = None - prompt_strength: typing.Optional[float] = None - controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None - seed: typing.Optional[int] = None - image_guidance_scale: typing.Optional[float] = None - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/img2img_page_status_response.py b/src/gooey/types/img2img_page_status_response.py index 27ac1cf..811697d 100644 --- a/src/gooey/types/img2img_page_status_response.py +++ b/src/gooey/types/img2img_page_status_response.py @@ -25,7 +25,7 @@ class Img2ImgPageStatusResponse(UniversalBaseModel): Time when the run was created as ISO format """ - run_time_sec: int = pydantic.Field() + run_time_sec: float = pydantic.Field() """ Total run time in seconds """ diff --git a/src/gooey/types/letter_writer_page_status_response.py b/src/gooey/types/letter_writer_page_status_response.py index 66b191b..63e8505 100644 --- a/src/gooey/types/letter_writer_page_status_response.py +++ b/src/gooey/types/letter_writer_page_status_response.py @@ -25,7 +25,7 @@ class LetterWriterPageStatusResponse(UniversalBaseModel): Time when the run was created as ISO format """ - run_time_sec: int = pydantic.Field() + run_time_sec: float = pydantic.Field() """ Total run time in seconds """ diff --git a/src/gooey/types/lipsync_page_request.py b/src/gooey/types/lipsync_page_request.py deleted file mode 100644 index 89840ab..0000000 --- a/src/gooey/types/lipsync_page_request.py +++ /dev/null @@ -1,38 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .lipsync_page_request_selected_model import LipsyncPageRequestSelectedModel -from .recipe_function import RecipeFunction -from .run_settings import RunSettings -from .sad_talker_settings import SadTalkerSettings - - -class LipsyncPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[RecipeFunction]] = None - variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - input_face: typing.Optional[str] = None - face_padding_top: typing.Optional[int] = None - face_padding_bottom: typing.Optional[int] = None - face_padding_left: typing.Optional[int] = None - face_padding_right: typing.Optional[int] = None - sadtalker_settings: typing.Optional[SadTalkerSettings] = None - selected_model: typing.Optional[LipsyncPageRequestSelectedModel] = None - input_audio: typing.Optional[str] = None - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/lipsync_page_status_response.py b/src/gooey/types/lipsync_page_status_response.py index 249e5a1..7060e62 100644 --- a/src/gooey/types/lipsync_page_status_response.py +++ b/src/gooey/types/lipsync_page_status_response.py @@ -25,7 +25,7 @@ class LipsyncPageStatusResponse(UniversalBaseModel): Time when the run was created as ISO format """ - run_time_sec: int = pydantic.Field() + run_time_sec: float = pydantic.Field() """ Total run time in seconds """ diff --git a/src/gooey/types/lipsync_tts_page_request.py b/src/gooey/types/lipsync_tts_page_request.py deleted file mode 100644 index 31cdcd5..0000000 --- a/src/gooey/types/lipsync_tts_page_request.py +++ /dev/null @@ -1,63 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .lipsync_tts_page_request_openai_tts_model import LipsyncTtsPageRequestOpenaiTtsModel -from .lipsync_tts_page_request_openai_voice_name import LipsyncTtsPageRequestOpenaiVoiceName -from .lipsync_tts_page_request_selected_model import LipsyncTtsPageRequestSelectedModel -from .lipsync_tts_page_request_tts_provider import LipsyncTtsPageRequestTtsProvider -from .recipe_function import RecipeFunction -from .run_settings import RunSettings -from .sad_talker_settings import SadTalkerSettings - - -class LipsyncTtsPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[RecipeFunction]] = None - variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - text_prompt: str - tts_provider: typing.Optional[LipsyncTtsPageRequestTtsProvider] = None - uberduck_voice_name: typing.Optional[str] = None - uberduck_speaking_rate: typing.Optional[float] = None - google_voice_name: typing.Optional[str] = None - google_speaking_rate: typing.Optional[float] = None - google_pitch: typing.Optional[float] = None - bark_history_prompt: typing.Optional[str] = None - elevenlabs_voice_name: typing.Optional[str] = pydantic.Field(default=None) - """ - Use `elevenlabs_voice_id` instead - """ - - elevenlabs_api_key: typing.Optional[str] = None - elevenlabs_voice_id: typing.Optional[str] = None - elevenlabs_model: typing.Optional[str] = None - elevenlabs_stability: typing.Optional[float] = None - elevenlabs_similarity_boost: typing.Optional[float] = None - elevenlabs_style: typing.Optional[float] = None - elevenlabs_speaker_boost: typing.Optional[bool] = None - azure_voice_name: typing.Optional[str] = None - openai_voice_name: typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] = None - openai_tts_model: typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] = None - input_face: typing.Optional[str] = None - face_padding_top: typing.Optional[int] = None - face_padding_bottom: typing.Optional[int] = None - face_padding_left: typing.Optional[int] = None - face_padding_right: typing.Optional[int] = None - sadtalker_settings: typing.Optional[SadTalkerSettings] = None - selected_model: typing.Optional[LipsyncTtsPageRequestSelectedModel] = None - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/lipsync_tts_page_status_response.py b/src/gooey/types/lipsync_tts_page_status_response.py index 25e7614..6cb65b3 100644 --- a/src/gooey/types/lipsync_tts_page_status_response.py +++ b/src/gooey/types/lipsync_tts_page_status_response.py @@ -25,7 +25,7 @@ class LipsyncTtsPageStatusResponse(UniversalBaseModel): Time when the run was created as ISO format """ - run_time_sec: int = pydantic.Field() + run_time_sec: float = pydantic.Field() """ Total run time in seconds """ diff --git a/src/gooey/types/object_inpainting_page_request.py b/src/gooey/types/object_inpainting_page_request.py deleted file mode 100644 index 3b1cbc5..0000000 --- a/src/gooey/types/object_inpainting_page_request.py +++ /dev/null @@ -1,44 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .object_inpainting_page_request_selected_model import ObjectInpaintingPageRequestSelectedModel -from .recipe_function import RecipeFunction -from .run_settings import RunSettings - - -class ObjectInpaintingPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[RecipeFunction]] = None - variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - input_image: str - text_prompt: str - obj_scale: typing.Optional[float] = None - obj_pos_x: typing.Optional[float] = None - obj_pos_y: typing.Optional[float] = None - mask_threshold: typing.Optional[float] = None - selected_model: typing.Optional[ObjectInpaintingPageRequestSelectedModel] = None - negative_prompt: typing.Optional[str] = None - num_outputs: typing.Optional[int] = None - quality: typing.Optional[int] = None - output_width: typing.Optional[int] = None - output_height: typing.Optional[int] = None - guidance_scale: typing.Optional[float] = None - sd2upscaling: typing.Optional[bool] = pydantic.Field(alias="sd_2_upscaling", default=None) - seed: typing.Optional[int] = None - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/object_inpainting_page_status_response.py b/src/gooey/types/object_inpainting_page_status_response.py index fd2e180..96da6e3 100644 --- a/src/gooey/types/object_inpainting_page_status_response.py +++ b/src/gooey/types/object_inpainting_page_status_response.py @@ -25,7 +25,7 @@ class ObjectInpaintingPageStatusResponse(UniversalBaseModel): Time when the run was created as ISO format """ - run_time_sec: int = pydantic.Field() + run_time_sec: float = pydantic.Field() """ Total run time in seconds """ diff --git a/src/gooey/types/qr_code_generator_page_request.py b/src/gooey/types/qr_code_generator_page_request.py deleted file mode 100644 index 6ebb5c4..0000000 --- a/src/gooey/types/qr_code_generator_page_request.py +++ /dev/null @@ -1,67 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .qr_code_generator_page_request_image_prompt_controlnet_models_item import ( - QrCodeGeneratorPageRequestImagePromptControlnetModelsItem, -) -from .qr_code_generator_page_request_scheduler import QrCodeGeneratorPageRequestScheduler -from .qr_code_generator_page_request_selected_controlnet_model_item import ( - QrCodeGeneratorPageRequestSelectedControlnetModelItem, -) -from .qr_code_generator_page_request_selected_model import QrCodeGeneratorPageRequestSelectedModel -from .recipe_function import RecipeFunction -from .run_settings import RunSettings -from .vcard import Vcard - - -class QrCodeGeneratorPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[RecipeFunction]] = None - variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - qr_code_data: typing.Optional[str] = None - qr_code_input_image: typing.Optional[str] = None - qr_code_vcard: typing.Optional[Vcard] = None - qr_code_file: typing.Optional[str] = None - use_url_shortener: typing.Optional[bool] = None - text_prompt: str - negative_prompt: typing.Optional[str] = None - image_prompt: typing.Optional[str] = None - image_prompt_controlnet_models: typing.Optional[ - typing.List[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem] - ] = None - image_prompt_strength: typing.Optional[float] = None - image_prompt_scale: typing.Optional[float] = None - image_prompt_pos_x: typing.Optional[float] = None - image_prompt_pos_y: typing.Optional[float] = None - selected_model: typing.Optional[QrCodeGeneratorPageRequestSelectedModel] = None - selected_controlnet_model: typing.Optional[ - typing.List[QrCodeGeneratorPageRequestSelectedControlnetModelItem] - ] = None - output_width: typing.Optional[int] = None - output_height: typing.Optional[int] = None - guidance_scale: typing.Optional[float] = None - controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None - num_outputs: typing.Optional[int] = None - quality: typing.Optional[int] = None - scheduler: typing.Optional[QrCodeGeneratorPageRequestScheduler] = None - seed: typing.Optional[int] = None - obj_scale: typing.Optional[float] = None - obj_pos_x: typing.Optional[float] = None - obj_pos_y: typing.Optional[float] = None - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/qr_code_generator_page_status_response.py b/src/gooey/types/qr_code_generator_page_status_response.py index d0fa01a..6d89074 100644 --- a/src/gooey/types/qr_code_generator_page_status_response.py +++ b/src/gooey/types/qr_code_generator_page_status_response.py @@ -25,7 +25,7 @@ class QrCodeGeneratorPageStatusResponse(UniversalBaseModel): Time when the run was created as ISO format """ - run_time_sec: int = pydantic.Field() + run_time_sec: float = pydantic.Field() """ Total run time in seconds """ diff --git a/src/gooey/types/related_qn_a_doc_page_request.py b/src/gooey/types/related_qn_a_doc_page_request.py deleted file mode 100644 index 4141a6b..0000000 --- a/src/gooey/types/related_qn_a_doc_page_request.py +++ /dev/null @@ -1,69 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .recipe_function import RecipeFunction -from .related_qn_a_doc_page_request_citation_style import RelatedQnADocPageRequestCitationStyle -from .related_qn_a_doc_page_request_embedding_model import RelatedQnADocPageRequestEmbeddingModel -from .related_qn_a_doc_page_request_keyword_query import RelatedQnADocPageRequestKeywordQuery -from .related_qn_a_doc_page_request_selected_model import RelatedQnADocPageRequestSelectedModel -from .run_settings import RunSettings -from .serp_search_location import SerpSearchLocation -from .serp_search_type import SerpSearchType - - -class RelatedQnADocPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[RecipeFunction]] = None - variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - search_query: str - keyword_query: typing.Optional[RelatedQnADocPageRequestKeywordQuery] = None - documents: typing.Optional[typing.List[str]] = None - max_references: typing.Optional[int] = None - max_context_words: typing.Optional[int] = None - scroll_jump: typing.Optional[int] = None - doc_extract_url: typing.Optional[str] = None - embedding_model: typing.Optional[RelatedQnADocPageRequestEmbeddingModel] = None - dense_weight: typing.Optional[float] = pydantic.Field(default=None) - """ - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - """ - - task_instructions: typing.Optional[str] = None - query_instructions: typing.Optional[str] = None - selected_model: typing.Optional[RelatedQnADocPageRequestSelectedModel] = None - avoid_repetition: typing.Optional[bool] = None - num_outputs: typing.Optional[int] = None - quality: typing.Optional[float] = None - max_tokens: typing.Optional[int] = None - sampling_temperature: typing.Optional[float] = None - citation_style: typing.Optional[RelatedQnADocPageRequestCitationStyle] = None - serp_search_location: typing.Optional[SerpSearchLocation] = None - scaleserp_locations: typing.Optional[typing.List[str]] = pydantic.Field(default=None) - """ - DEPRECATED: use `serp_search_location` instead - """ - - serp_search_type: typing.Optional[SerpSearchType] = None - scaleserp_search_field: typing.Optional[str] = pydantic.Field(default=None) - """ - DEPRECATED: use `serp_search_type` instead - """ - - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/related_qn_a_doc_page_request_response_format_type.py b/src/gooey/types/related_qn_a_doc_page_request_response_format_type.py new file mode 100644 index 0000000..c65a896 --- /dev/null +++ b/src/gooey/types/related_qn_a_doc_page_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +RelatedQnADocPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/related_qn_a_doc_page_request_selected_model.py b/src/gooey/types/related_qn_a_doc_page_request_selected_model.py index b642574..2591cf1 100644 --- a/src/gooey/types/related_qn_a_doc_page_request_selected_model.py +++ b/src/gooey/types/related_qn_a_doc_page_request_selected_model.py @@ -5,6 +5,8 @@ RelatedQnADocPageRequestSelectedModel = typing.Union[ typing.Literal[ "gpt_4_o", + "gpt_4_o_mini", + "chatgpt_4_o", "gpt_4_turbo_vision", "gpt_4_vision", "gpt_4_turbo", @@ -14,10 +16,14 @@ "gpt_3_5_turbo_16k", "gpt_3_5_turbo_instruct", "llama3_70b", + "llama_3_groq_70b_tool_use", "llama3_8b", + "llama_3_groq_8b_tool_use", "llama2_70b_chat", "mixtral_8x7b_instruct_0_1", + "gemma_2_9b_it", "gemma_7b_it", + "gemini_1_5_flash", "gemini_1_5_pro", "gemini_1_pro_vision", "gemini_1_pro", @@ -28,6 +34,8 @@ "claude_3_sonnet", "claude_3_haiku", "sea_lion_7b_instruct", + "llama3_8b_cpt_sea_lion_v2_instruct", + "sarvam_2b", "text_davinci_003", "text_davinci_002", "code_davinci_002", diff --git a/src/gooey/types/related_qn_a_doc_page_status_response.py b/src/gooey/types/related_qn_a_doc_page_status_response.py index 6e84f56..9a269fa 100644 --- a/src/gooey/types/related_qn_a_doc_page_status_response.py +++ b/src/gooey/types/related_qn_a_doc_page_status_response.py @@ -25,7 +25,7 @@ class RelatedQnADocPageStatusResponse(UniversalBaseModel): Time when the run was created as ISO format """ - run_time_sec: int = pydantic.Field() + run_time_sec: float = pydantic.Field() """ Total run time in seconds """ diff --git a/src/gooey/types/related_qn_a_page_request.py b/src/gooey/types/related_qn_a_page_request.py deleted file mode 100644 index 8f8d262..0000000 --- a/src/gooey/types/related_qn_a_page_request.py +++ /dev/null @@ -1,65 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .recipe_function import RecipeFunction -from .related_qn_a_page_request_embedding_model import RelatedQnAPageRequestEmbeddingModel -from .related_qn_a_page_request_selected_model import RelatedQnAPageRequestSelectedModel -from .run_settings import RunSettings -from .serp_search_location import SerpSearchLocation -from .serp_search_type import SerpSearchType - - -class RelatedQnAPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[RecipeFunction]] = None - variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - serp_search_location: typing.Optional[SerpSearchLocation] = None - scaleserp_locations: typing.Optional[typing.List[str]] = pydantic.Field(default=None) - """ - DEPRECATED: use `serp_search_location` instead - """ - - serp_search_type: typing.Optional[SerpSearchType] = None - scaleserp_search_field: typing.Optional[str] = pydantic.Field(default=None) - """ - DEPRECATED: use `serp_search_type` instead - """ - - search_query: str - site_filter: str - task_instructions: typing.Optional[str] = None - query_instructions: typing.Optional[str] = None - selected_model: typing.Optional[RelatedQnAPageRequestSelectedModel] = None - avoid_repetition: typing.Optional[bool] = None - num_outputs: typing.Optional[int] = None - quality: typing.Optional[float] = None - max_tokens: typing.Optional[int] = None - sampling_temperature: typing.Optional[float] = None - max_search_urls: typing.Optional[int] = None - max_references: typing.Optional[int] = None - max_context_words: typing.Optional[int] = None - scroll_jump: typing.Optional[int] = None - embedding_model: typing.Optional[RelatedQnAPageRequestEmbeddingModel] = None - dense_weight: typing.Optional[float] = pydantic.Field(default=None) - """ - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - """ - - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/related_qn_a_page_request_response_format_type.py b/src/gooey/types/related_qn_a_page_request_response_format_type.py new file mode 100644 index 0000000..7bada87 --- /dev/null +++ b/src/gooey/types/related_qn_a_page_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +RelatedQnAPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/related_qn_a_page_request_selected_model.py b/src/gooey/types/related_qn_a_page_request_selected_model.py index 72c52e8..211bdbc 100644 --- a/src/gooey/types/related_qn_a_page_request_selected_model.py +++ b/src/gooey/types/related_qn_a_page_request_selected_model.py @@ -5,6 +5,8 @@ RelatedQnAPageRequestSelectedModel = typing.Union[ typing.Literal[ "gpt_4_o", + "gpt_4_o_mini", + "chatgpt_4_o", "gpt_4_turbo_vision", "gpt_4_vision", "gpt_4_turbo", @@ -14,10 +16,14 @@ "gpt_3_5_turbo_16k", "gpt_3_5_turbo_instruct", "llama3_70b", + "llama_3_groq_70b_tool_use", "llama3_8b", + "llama_3_groq_8b_tool_use", "llama2_70b_chat", "mixtral_8x7b_instruct_0_1", + "gemma_2_9b_it", "gemma_7b_it", + "gemini_1_5_flash", "gemini_1_5_pro", "gemini_1_pro_vision", "gemini_1_pro", @@ -28,6 +34,8 @@ "claude_3_sonnet", "claude_3_haiku", "sea_lion_7b_instruct", + "llama3_8b_cpt_sea_lion_v2_instruct", + "sarvam_2b", "text_davinci_003", "text_davinci_002", "code_davinci_002", diff --git a/src/gooey/types/related_qn_a_page_status_response.py b/src/gooey/types/related_qn_a_page_status_response.py index 4f0d333..f6a20eb 100644 --- a/src/gooey/types/related_qn_a_page_status_response.py +++ b/src/gooey/types/related_qn_a_page_status_response.py @@ -25,7 +25,7 @@ class RelatedQnAPageStatusResponse(UniversalBaseModel): Time when the run was created as ISO format """ - run_time_sec: int = pydantic.Field() + run_time_sec: float = pydantic.Field() """ Total run time in seconds """ diff --git a/src/gooey/types/seo_summary_page_request.py b/src/gooey/types/seo_summary_page_request.py deleted file mode 100644 index 91a71dd..0000000 --- a/src/gooey/types/seo_summary_page_request.py +++ /dev/null @@ -1,51 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .run_settings import RunSettings -from .seo_summary_page_request_selected_model import SeoSummaryPageRequestSelectedModel -from .serp_search_location import SerpSearchLocation -from .serp_search_type import SerpSearchType - - -class SeoSummaryPageRequest(UniversalBaseModel): - serp_search_location: typing.Optional[SerpSearchLocation] = None - scaleserp_locations: typing.Optional[typing.List[str]] = pydantic.Field(default=None) - """ - DEPRECATED: use `serp_search_location` instead - """ - - serp_search_type: typing.Optional[SerpSearchType] = None - scaleserp_search_field: typing.Optional[str] = pydantic.Field(default=None) - """ - DEPRECATED: use `serp_search_type` instead - """ - - search_query: str - keywords: str - title: str - company_url: str - task_instructions: typing.Optional[str] = None - enable_html: typing.Optional[bool] = None - selected_model: typing.Optional[SeoSummaryPageRequestSelectedModel] = None - sampling_temperature: typing.Optional[float] = None - max_tokens: typing.Optional[int] = None - num_outputs: typing.Optional[int] = None - quality: typing.Optional[float] = None - avoid_repetition: typing.Optional[bool] = None - max_search_urls: typing.Optional[int] = None - enable_crosslinks: typing.Optional[bool] = None - seed: typing.Optional[int] = None - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/seo_summary_page_request_response_format_type.py b/src/gooey/types/seo_summary_page_request_response_format_type.py new file mode 100644 index 0000000..26f948b --- /dev/null +++ b/src/gooey/types/seo_summary_page_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +SeoSummaryPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/seo_summary_page_request_selected_model.py b/src/gooey/types/seo_summary_page_request_selected_model.py index dd97fe4..7030bfd 100644 --- a/src/gooey/types/seo_summary_page_request_selected_model.py +++ b/src/gooey/types/seo_summary_page_request_selected_model.py @@ -5,6 +5,8 @@ SeoSummaryPageRequestSelectedModel = typing.Union[ typing.Literal[ "gpt_4_o", + "gpt_4_o_mini", + "chatgpt_4_o", "gpt_4_turbo_vision", "gpt_4_vision", "gpt_4_turbo", @@ -14,10 +16,14 @@ "gpt_3_5_turbo_16k", "gpt_3_5_turbo_instruct", "llama3_70b", + "llama_3_groq_70b_tool_use", "llama3_8b", + "llama_3_groq_8b_tool_use", "llama2_70b_chat", "mixtral_8x7b_instruct_0_1", + "gemma_2_9b_it", "gemma_7b_it", + "gemini_1_5_flash", "gemini_1_5_pro", "gemini_1_pro_vision", "gemini_1_pro", @@ -28,6 +34,8 @@ "claude_3_sonnet", "claude_3_haiku", "sea_lion_7b_instruct", + "llama3_8b_cpt_sea_lion_v2_instruct", + "sarvam_2b", "text_davinci_003", "text_davinci_002", "code_davinci_002", diff --git a/src/gooey/types/seo_summary_page_status_response.py b/src/gooey/types/seo_summary_page_status_response.py index 0624e59..e12a38a 100644 --- a/src/gooey/types/seo_summary_page_status_response.py +++ b/src/gooey/types/seo_summary_page_status_response.py @@ -25,7 +25,7 @@ class SeoSummaryPageStatusResponse(UniversalBaseModel): Time when the run was created as ISO format """ - run_time_sec: int = pydantic.Field() + run_time_sec: float = pydantic.Field() """ Total run time in seconds """ diff --git a/src/gooey/types/serp_search_location.py b/src/gooey/types/serp_search_location.py index cdabfea..9b64ad9 100644 --- a/src/gooey/types/serp_search_location.py +++ b/src/gooey/types/serp_search_location.py @@ -189,7 +189,7 @@ "vc", "ws", "sm", - "st", + "gui", "sa", "sn", "rs", diff --git a/src/gooey/types/smart_gpt_page_request.py b/src/gooey/types/smart_gpt_page_request.py deleted file mode 100644 index 371b0d1..0000000 --- a/src/gooey/types/smart_gpt_page_request.py +++ /dev/null @@ -1,39 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .recipe_function import RecipeFunction -from .run_settings import RunSettings -from .smart_gpt_page_request_selected_model import SmartGptPageRequestSelectedModel - - -class SmartGptPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[RecipeFunction]] = None - variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - input_prompt: str - cot_prompt: typing.Optional[str] = None - reflexion_prompt: typing.Optional[str] = None - dera_prompt: typing.Optional[str] = None - selected_model: typing.Optional[SmartGptPageRequestSelectedModel] = None - avoid_repetition: typing.Optional[bool] = None - num_outputs: typing.Optional[int] = None - quality: typing.Optional[float] = None - max_tokens: typing.Optional[int] = None - sampling_temperature: typing.Optional[float] = None - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/smart_gpt_page_status_response.py b/src/gooey/types/smart_gpt_page_status_response.py index bdaa86d..51366a2 100644 --- a/src/gooey/types/smart_gpt_page_status_response.py +++ b/src/gooey/types/smart_gpt_page_status_response.py @@ -25,7 +25,7 @@ class SmartGptPageStatusResponse(UniversalBaseModel): Time when the run was created as ISO format """ - run_time_sec: int = pydantic.Field() + run_time_sec: float = pydantic.Field() """ Total run time in seconds """ diff --git a/src/gooey/types/social_lookup_email_page_request.py b/src/gooey/types/social_lookup_email_page_request.py deleted file mode 100644 index e51cf27..0000000 --- a/src/gooey/types/social_lookup_email_page_request.py +++ /dev/null @@ -1,37 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .recipe_function import RecipeFunction -from .run_settings import RunSettings -from .social_lookup_email_page_request_selected_model import SocialLookupEmailPageRequestSelectedModel - - -class SocialLookupEmailPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[RecipeFunction]] = None - variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - email_address: str - input_prompt: typing.Optional[str] = None - selected_model: typing.Optional[SocialLookupEmailPageRequestSelectedModel] = None - num_outputs: typing.Optional[int] = None - avoid_repetition: typing.Optional[bool] = None - quality: typing.Optional[float] = None - max_tokens: typing.Optional[int] = None - sampling_temperature: typing.Optional[float] = None - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/social_lookup_email_page_request_response_format_type.py b/src/gooey/types/social_lookup_email_page_request_response_format_type.py new file mode 100644 index 0000000..46c50db --- /dev/null +++ b/src/gooey/types/social_lookup_email_page_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +SocialLookupEmailPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/social_lookup_email_page_request_selected_model.py b/src/gooey/types/social_lookup_email_page_request_selected_model.py index 41c39fd..1a0cba7 100644 --- a/src/gooey/types/social_lookup_email_page_request_selected_model.py +++ b/src/gooey/types/social_lookup_email_page_request_selected_model.py @@ -5,6 +5,8 @@ SocialLookupEmailPageRequestSelectedModel = typing.Union[ typing.Literal[ "gpt_4_o", + "gpt_4_o_mini", + "chatgpt_4_o", "gpt_4_turbo_vision", "gpt_4_vision", "gpt_4_turbo", @@ -14,10 +16,14 @@ "gpt_3_5_turbo_16k", "gpt_3_5_turbo_instruct", "llama3_70b", + "llama_3_groq_70b_tool_use", "llama3_8b", + "llama_3_groq_8b_tool_use", "llama2_70b_chat", "mixtral_8x7b_instruct_0_1", + "gemma_2_9b_it", "gemma_7b_it", + "gemini_1_5_flash", "gemini_1_5_pro", "gemini_1_pro_vision", "gemini_1_pro", @@ -28,6 +34,8 @@ "claude_3_sonnet", "claude_3_haiku", "sea_lion_7b_instruct", + "llama3_8b_cpt_sea_lion_v2_instruct", + "sarvam_2b", "text_davinci_003", "text_davinci_002", "code_davinci_002", diff --git a/src/gooey/types/social_lookup_email_page_status_response.py b/src/gooey/types/social_lookup_email_page_status_response.py index 682ce31..45899a5 100644 --- a/src/gooey/types/social_lookup_email_page_status_response.py +++ b/src/gooey/types/social_lookup_email_page_status_response.py @@ -25,7 +25,7 @@ class SocialLookupEmailPageStatusResponse(UniversalBaseModel): Time when the run was created as ISO format """ - run_time_sec: int = pydantic.Field() + run_time_sec: float = pydantic.Field() """ Total run time in seconds """ diff --git a/src/gooey/types/text2audio_page_request.py b/src/gooey/types/text2audio_page_request.py deleted file mode 100644 index f549c7e..0000000 --- a/src/gooey/types/text2audio_page_request.py +++ /dev/null @@ -1,37 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .recipe_function import RecipeFunction -from .run_settings import RunSettings - - -class Text2AudioPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[RecipeFunction]] = None - variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - text_prompt: str - negative_prompt: typing.Optional[str] = None - duration_sec: typing.Optional[float] = None - num_outputs: typing.Optional[int] = None - quality: typing.Optional[int] = None - guidance_scale: typing.Optional[float] = None - seed: typing.Optional[int] = None - sd2upscaling: typing.Optional[bool] = pydantic.Field(alias="sd_2_upscaling", default=None) - selected_models: typing.Optional[typing.List[typing.Literal["audio_ldm"]]] = None - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/text2audio_page_status_response.py b/src/gooey/types/text2audio_page_status_response.py index 1f88ce5..e7fb600 100644 --- a/src/gooey/types/text2audio_page_status_response.py +++ b/src/gooey/types/text2audio_page_status_response.py @@ -25,7 +25,7 @@ class Text2AudioPageStatusResponse(UniversalBaseModel): Time when the run was created as ISO format """ - run_time_sec: int = pydantic.Field() + run_time_sec: float = pydantic.Field() """ Total run time in seconds """ diff --git a/src/gooey/types/text_to_speech_page_request.py b/src/gooey/types/text_to_speech_page_request.py deleted file mode 100644 index bdd5d95..0000000 --- a/src/gooey/types/text_to_speech_page_request.py +++ /dev/null @@ -1,54 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .recipe_function import RecipeFunction -from .run_settings import RunSettings -from .text_to_speech_page_request_openai_tts_model import TextToSpeechPageRequestOpenaiTtsModel -from .text_to_speech_page_request_openai_voice_name import TextToSpeechPageRequestOpenaiVoiceName -from .text_to_speech_page_request_tts_provider import TextToSpeechPageRequestTtsProvider - - -class TextToSpeechPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[RecipeFunction]] = None - variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - text_prompt: str - tts_provider: typing.Optional[TextToSpeechPageRequestTtsProvider] = None - uberduck_voice_name: typing.Optional[str] = None - uberduck_speaking_rate: typing.Optional[float] = None - google_voice_name: typing.Optional[str] = None - google_speaking_rate: typing.Optional[float] = None - google_pitch: typing.Optional[float] = None - bark_history_prompt: typing.Optional[str] = None - elevenlabs_voice_name: typing.Optional[str] = pydantic.Field(default=None) - """ - Use `elevenlabs_voice_id` instead - """ - - elevenlabs_api_key: typing.Optional[str] = None - elevenlabs_voice_id: typing.Optional[str] = None - elevenlabs_model: typing.Optional[str] = None - elevenlabs_stability: typing.Optional[float] = None - elevenlabs_similarity_boost: typing.Optional[float] = None - elevenlabs_style: typing.Optional[float] = None - elevenlabs_speaker_boost: typing.Optional[bool] = None - azure_voice_name: typing.Optional[str] = None - openai_voice_name: typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] = None - openai_tts_model: typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] = None - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/text_to_speech_page_status_response.py b/src/gooey/types/text_to_speech_page_status_response.py index 814f290..a8daeba 100644 --- a/src/gooey/types/text_to_speech_page_status_response.py +++ b/src/gooey/types/text_to_speech_page_status_response.py @@ -25,7 +25,7 @@ class TextToSpeechPageStatusResponse(UniversalBaseModel): Time when the run was created as ISO format """ - run_time_sec: int = pydantic.Field() + run_time_sec: float = pydantic.Field() """ Total run time in seconds """ diff --git a/src/gooey/types/translation_page_request.py b/src/gooey/types/translation_page_request.py deleted file mode 100644 index 2c0f394..0000000 --- a/src/gooey/types/translation_page_request.py +++ /dev/null @@ -1,39 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .recipe_function import RecipeFunction -from .run_settings import RunSettings -from .translation_page_request_selected_model import TranslationPageRequestSelectedModel - - -class TranslationPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[RecipeFunction]] = None - variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - texts: typing.Optional[typing.List[str]] = None - selected_model: typing.Optional[TranslationPageRequestSelectedModel] = None - translation_source: typing.Optional[str] = None - translation_target: typing.Optional[str] = None - glossary_document: typing.Optional[str] = pydantic.Field(default=None) - """ - Provide a glossary to customize translation and improve accuracy of domain-specific terms. - If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). - """ - - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/translation_page_status_response.py b/src/gooey/types/translation_page_status_response.py index 59a9838..0f94d1b 100644 --- a/src/gooey/types/translation_page_status_response.py +++ b/src/gooey/types/translation_page_status_response.py @@ -25,7 +25,7 @@ class TranslationPageStatusResponse(UniversalBaseModel): Time when the run was created as ISO format """ - run_time_sec: int = pydantic.Field() + run_time_sec: float = pydantic.Field() """ Total run time in seconds """ diff --git a/src/gooey/types/video_bots_page_request.py b/src/gooey/types/video_bots_page_request.py deleted file mode 100644 index fd0c6ab..0000000 --- a/src/gooey/types/video_bots_page_request.py +++ /dev/null @@ -1,138 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .conversation_entry import ConversationEntry -from .llm_tools import LlmTools -from .recipe_function import RecipeFunction -from .run_settings import RunSettings -from .sad_talker_settings import SadTalkerSettings -from .video_bots_page_request_asr_model import VideoBotsPageRequestAsrModel -from .video_bots_page_request_citation_style import VideoBotsPageRequestCitationStyle -from .video_bots_page_request_embedding_model import VideoBotsPageRequestEmbeddingModel -from .video_bots_page_request_lipsync_model import VideoBotsPageRequestLipsyncModel -from .video_bots_page_request_openai_tts_model import VideoBotsPageRequestOpenaiTtsModel -from .video_bots_page_request_openai_voice_name import VideoBotsPageRequestOpenaiVoiceName -from .video_bots_page_request_selected_model import VideoBotsPageRequestSelectedModel -from .video_bots_page_request_translation_model import VideoBotsPageRequestTranslationModel -from .video_bots_page_request_tts_provider import VideoBotsPageRequestTtsProvider - - -class VideoBotsPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[RecipeFunction]] = None - variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None) - """ - Variables to be used as Jinja prompt templates and in functions as arguments - """ - - input_prompt: typing.Optional[str] = None - input_audio: typing.Optional[str] = None - input_images: typing.Optional[typing.List[str]] = None - input_documents: typing.Optional[typing.List[str]] = None - doc_extract_url: typing.Optional[str] = pydantic.Field(default=None) - """ - Select a workflow to extract text from documents and images. - """ - - messages: typing.Optional[typing.List[ConversationEntry]] = None - bot_script: typing.Optional[str] = None - selected_model: typing.Optional[VideoBotsPageRequestSelectedModel] = None - document_model: typing.Optional[str] = pydantic.Field(default=None) - """ - When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) - """ - - avoid_repetition: typing.Optional[bool] = None - num_outputs: typing.Optional[int] = None - quality: typing.Optional[float] = None - max_tokens: typing.Optional[int] = None - sampling_temperature: typing.Optional[float] = None - task_instructions: typing.Optional[str] = None - query_instructions: typing.Optional[str] = None - keyword_instructions: typing.Optional[str] = None - documents: typing.Optional[typing.List[str]] = None - max_references: typing.Optional[int] = None - max_context_words: typing.Optional[int] = None - scroll_jump: typing.Optional[int] = None - embedding_model: typing.Optional[VideoBotsPageRequestEmbeddingModel] = None - dense_weight: typing.Optional[float] = pydantic.Field(default=None) - """ - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - """ - - citation_style: typing.Optional[VideoBotsPageRequestCitationStyle] = None - use_url_shortener: typing.Optional[bool] = None - asr_model: typing.Optional[VideoBotsPageRequestAsrModel] = pydantic.Field(default=None) - """ - Choose a model to transcribe incoming audio messages to text. - """ - - asr_language: typing.Optional[str] = pydantic.Field(default=None) - """ - Choose a language to transcribe incoming audio messages to text. - """ - - translation_model: typing.Optional[VideoBotsPageRequestTranslationModel] = None - user_language: typing.Optional[str] = pydantic.Field(default=None) - """ - Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. - """ - - input_glossary_document: typing.Optional[str] = pydantic.Field(default=None) - """ - Translation Glossary for User Langauge -> LLM Language (English) - """ - - output_glossary_document: typing.Optional[str] = pydantic.Field(default=None) - """ - Translation Glossary for LLM Language (English) -> User Langauge - """ - - lipsync_model: typing.Optional[VideoBotsPageRequestLipsyncModel] = None - tools: typing.Optional[typing.List[LlmTools]] = pydantic.Field(default=None) - """ - Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). - """ - - tts_provider: typing.Optional[VideoBotsPageRequestTtsProvider] = None - uberduck_voice_name: typing.Optional[str] = None - uberduck_speaking_rate: typing.Optional[float] = None - google_voice_name: typing.Optional[str] = None - google_speaking_rate: typing.Optional[float] = None - google_pitch: typing.Optional[float] = None - bark_history_prompt: typing.Optional[str] = None - elevenlabs_voice_name: typing.Optional[str] = pydantic.Field(default=None) - """ - Use `elevenlabs_voice_id` instead - """ - - elevenlabs_api_key: typing.Optional[str] = None - elevenlabs_voice_id: typing.Optional[str] = None - elevenlabs_model: typing.Optional[str] = None - elevenlabs_stability: typing.Optional[float] = None - elevenlabs_similarity_boost: typing.Optional[float] = None - elevenlabs_style: typing.Optional[float] = None - elevenlabs_speaker_boost: typing.Optional[bool] = None - azure_voice_name: typing.Optional[str] = None - openai_voice_name: typing.Optional[VideoBotsPageRequestOpenaiVoiceName] = None - openai_tts_model: typing.Optional[VideoBotsPageRequestOpenaiTtsModel] = None - input_face: typing.Optional[str] = None - face_padding_top: typing.Optional[int] = None - face_padding_bottom: typing.Optional[int] = None - face_padding_left: typing.Optional[int] = None - face_padding_right: typing.Optional[int] = None - sadtalker_settings: typing.Optional[SadTalkerSettings] = None - settings: typing.Optional[RunSettings] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/video_bots_page_status_response.py b/src/gooey/types/video_bots_page_status_response.py index d3d0021..d6ce8f2 100644 --- a/src/gooey/types/video_bots_page_status_response.py +++ b/src/gooey/types/video_bots_page_status_response.py @@ -25,7 +25,7 @@ class VideoBotsPageStatusResponse(UniversalBaseModel): Time when the run was created as ISO format """ - run_time_sec: int = pydantic.Field() + run_time_sec: float = pydantic.Field() """ Total run time in seconds """ diff --git a/src/gooey/web_search_gpt3/__init__.py b/src/gooey/web_search_gpt3/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/web_search_gpt3/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/web_search_gpt3/client.py b/src/gooey/web_search_gpt3/client.py deleted file mode 100644 index b9ba2ca..0000000 --- a/src/gooey/web_search_gpt3/client.py +++ /dev/null @@ -1,788 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.internal_server_error import InternalServerError -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3 -from ..types.failed_reponse_model_v2 import FailedReponseModelV2 -from ..types.generic_error_response import GenericErrorResponse -from ..types.google_gpt_page_request_embedding_model import GoogleGptPageRequestEmbeddingModel -from ..types.google_gpt_page_request_selected_model import GoogleGptPageRequestSelectedModel -from ..types.google_gpt_page_response import GoogleGptPageResponse -from ..types.google_gpt_page_status_response import GoogleGptPageStatusResponse -from ..types.http_validation_error import HttpValidationError -from ..types.recipe_function import RecipeFunction -from ..types.run_settings import RunSettings -from ..types.serp_search_location import SerpSearchLocation -from ..types.serp_search_type import SerpSearchType - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class WebSearchGpt3Client: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def google_gpt( - self, - *, - search_query: str, - site_filter: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, - scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - serp_search_type: typing.Optional[SerpSearchType] = OMIT, - scaleserp_search_field: typing.Optional[str] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - query_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[GoogleGptPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - max_search_urls: typing.Optional[int] = OMIT, - max_references: typing.Optional[int] = OMIT, - max_context_words: typing.Optional[int] = OMIT, - scroll_jump: typing.Optional[int] = OMIT, - embedding_model: typing.Optional[GoogleGptPageRequestEmbeddingModel] = OMIT, - dense_weight: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> GoogleGptPageResponse: - """ - Parameters - ---------- - search_query : str - - site_filter : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.Sequence[str]] - DEPRECATED: use `serp_search_location` instead - - serp_search_type : typing.Optional[SerpSearchType] - - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - selected_model : typing.Optional[GoogleGptPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - max_search_urls : typing.Optional[int] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - embedding_model : typing.Optional[GoogleGptPageRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - GoogleGptPageResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.web_search_gpt3.google_gpt( - search_query="search_query", - site_filter="site_filter", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v2/google-gpt/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, - "search_query": search_query, - "site_filter": site_filter, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "max_search_urls": max_search_urls, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(GoogleGptPageResponse, parse_obj_as(type_=GoogleGptPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def async_google_gpt( - self, - *, - search_query: str, - site_filter: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, - scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - serp_search_type: typing.Optional[SerpSearchType] = OMIT, - scaleserp_search_field: typing.Optional[str] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - query_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[GoogleGptPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - max_search_urls: typing.Optional[int] = OMIT, - max_references: typing.Optional[int] = OMIT, - max_context_words: typing.Optional[int] = OMIT, - scroll_jump: typing.Optional[int] = OMIT, - embedding_model: typing.Optional[GoogleGptPageRequestEmbeddingModel] = OMIT, - dense_weight: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - search_query : str - - site_filter : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.Sequence[str]] - DEPRECATED: use `serp_search_location` instead - - serp_search_type : typing.Optional[SerpSearchType] - - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - selected_model : typing.Optional[GoogleGptPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - max_search_urls : typing.Optional[int] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - embedding_model : typing.Optional[GoogleGptPageRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.web_search_gpt3.async_google_gpt( - search_query="search_query", - site_filter="site_filter", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/google-gpt/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, - "search_query": search_query, - "site_filter": site_filter, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "max_search_urls": max_search_urls, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def status_google_gpt( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> GoogleGptPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - GoogleGptPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - client.web_search_gpt3.status_google_gpt( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/google-gpt/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(GoogleGptPageStatusResponse, parse_obj_as(type_=GoogleGptPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncWebSearchGpt3Client: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def google_gpt( - self, - *, - search_query: str, - site_filter: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, - scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - serp_search_type: typing.Optional[SerpSearchType] = OMIT, - scaleserp_search_field: typing.Optional[str] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - query_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[GoogleGptPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - max_search_urls: typing.Optional[int] = OMIT, - max_references: typing.Optional[int] = OMIT, - max_context_words: typing.Optional[int] = OMIT, - scroll_jump: typing.Optional[int] = OMIT, - embedding_model: typing.Optional[GoogleGptPageRequestEmbeddingModel] = OMIT, - dense_weight: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> GoogleGptPageResponse: - """ - Parameters - ---------- - search_query : str - - site_filter : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.Sequence[str]] - DEPRECATED: use `serp_search_location` instead - - serp_search_type : typing.Optional[SerpSearchType] - - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - selected_model : typing.Optional[GoogleGptPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - max_search_urls : typing.Optional[int] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - embedding_model : typing.Optional[GoogleGptPageRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - GoogleGptPageResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.web_search_gpt3.google_gpt( - search_query="search_query", - site_filter="site_filter", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v2/google-gpt/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, - "search_query": search_query, - "site_filter": site_filter, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "max_search_urls": max_search_urls, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(GoogleGptPageResponse, parse_obj_as(type_=GoogleGptPageResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def async_google_gpt( - self, - *, - search_query: str, - site_filter: str, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, - scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - serp_search_type: typing.Optional[SerpSearchType] = OMIT, - scaleserp_search_field: typing.Optional[str] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - query_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[GoogleGptPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - max_search_urls: typing.Optional[int] = OMIT, - max_references: typing.Optional[int] = OMIT, - max_context_words: typing.Optional[int] = OMIT, - scroll_jump: typing.Optional[int] = OMIT, - embedding_model: typing.Optional[GoogleGptPageRequestEmbeddingModel] = OMIT, - dense_weight: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None - ) -> AsyncApiResponseModelV3: - """ - Parameters - ---------- - search_query : str - - site_filter : str - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.Sequence[str]] - DEPRECATED: use `serp_search_location` instead - - serp_search_type : typing.Optional[SerpSearchType] - - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - selected_model : typing.Optional[GoogleGptPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - max_search_urls : typing.Optional[int] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - embedding_model : typing.Optional[GoogleGptPageRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncApiResponseModelV3 - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.web_search_gpt3.async_google_gpt( - search_query="search_query", - site_filter="site_filter", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/google-gpt/async/", - method="POST", - json={ - "functions": functions, - "variables": variables, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, - "search_query": search_query, - "site_filter": site_filter, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "max_search_urls": max_search_urls, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsyncApiResponseModelV3, parse_obj_as(type_=AsyncApiResponseModelV3, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def status_google_gpt( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> GoogleGptPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - GoogleGptPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - authorization="YOUR_AUTHORIZATION", - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.web_search_gpt3.status_google_gpt( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/google-gpt/status/", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(GoogleGptPageStatusResponse, parse_obj_as(type_=GoogleGptPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json)