From 42e8681d4db6461e04449bd515344f7ab3254a7f Mon Sep 17 00:00:00 2001
From: fern-api <115122769+fern-api[bot]@users.noreply.github.com>
Date: Tue, 10 Sep 2024 11:39:47 +0000
Subject: [PATCH] Release 0.0.1-beta6
---
README.md | 22 +-
poetry.lock | 197 +-
pyproject.toml | 2 +-
reference.md | 8098 +++++++++++++++--
src/gooey/__init__.py | 570 +-
src/gooey/ai_animation_generator/__init__.py | 2 +
src/gooey/ai_animation_generator/client.py | 133 +
src/gooey/ai_art_qr_code/__init__.py | 2 +
src/gooey/ai_art_qr_code/client.py | 133 +
src/gooey/ai_background_changer/__init__.py | 2 +
src/gooey/ai_background_changer/client.py | 133 +
.../__init__.py | 2 +
.../client.py | 133 +
src/gooey/ai_image_with_a_face/__init__.py | 2 +
src/gooey/ai_image_with_a_face/client.py | 133 +
src/gooey/bulk_runner/client.py | 65 +-
src/gooey/chyron_plant_bot/__init__.py | 2 +
src/gooey/chyron_plant_bot/client.py | 133 +
src/gooey/client.py | 5541 +++++++++--
.../compare_ai_image_generators/__init__.py | 2 +
.../compare_ai_image_generators/client.py | 133 +
.../compare_ai_image_upscalers/__init__.py | 2 +
.../compare_ai_image_upscalers/client.py | 133 +
src/gooey/compare_ai_translations/__init__.py | 2 +
src/gooey/compare_ai_translations/client.py | 133 +
.../compare_ai_voice_generators/__init__.py | 2 +
.../compare_ai_voice_generators/client.py | 133 +
.../copilot_for_your_enterprise/__init__.py | 25 +
.../copilot_for_your_enterprise/client.py | 682 +-
.../types/__init__.py | 25 +
...sync_form_video_bots_request_asr_model.py} | 2 +-
...form_video_bots_request_citation_style.py} | 2 +-
...form_video_bots_request_embedding_model.py | 18 +
...c_form_video_bots_request_lipsync_model.py | 5 +
...orm_video_bots_request_openai_tts_model.py | 5 +
...m_video_bots_request_openai_voice_name.py} | 2 +-
...video_bots_request_response_format_type.py | 5 +
..._form_video_bots_request_selected_model.py | 47 +
...rm_video_bots_request_translation_model.py | 5 +
...c_form_video_bots_request_tts_provider.py} | 2 +-
src/gooey/copilot_integrations/__init__.py | 40 +-
src/gooey/copilot_integrations/client.py | 350 +-
.../copilot_integrations/types/__init__.py | 40 +-
.../create_stream_request_lipsync_model.py | 5 -
...ate_stream_request_response_format_type.py | 5 -
...create_stream_request_translation_model.py | 5 -
...o_bots_stream_create_request_asr_model.py} | 2 +-
...s_stream_create_request_citation_style.py} | 2 +-
...s_stream_create_request_embedding_model.py | 18 +
...ots_stream_create_request_lipsync_model.py | 5 +
..._stream_create_request_openai_tts_model.py | 5 +
...tream_create_request_openai_voice_name.py} | 2 +-
...am_create_request_response_format_type.py} | 2 +-
...ts_stream_create_request_selected_model.py | 47 +
...stream_create_request_translation_model.py | 5 +
...ots_stream_create_request_tts_provider.py} | 2 +-
src/gooey/core/client_wrapper.py | 2 +-
.../__init__.py | 2 +
.../client.py | 133 +
.../edit_an_image_with_ai_prompt/__init__.py | 2 +
.../edit_an_image_with_ai_prompt/client.py | 133 +
src/gooey/embeddings/client.py | 65 +-
src/gooey/evaluator/__init__.py | 3 +
src/gooey/evaluator/client.py | 297 +-
src/gooey/evaluator/types/__init__.py | 6 +
...bulk_eval_request_response_format_type.py} | 2 +-
...c_form_bulk_eval_request_selected_model.py | 47 +
src/gooey/functions/client.py | 124 +-
.../__init__.py | 2 +
.../client.py | 133 +
.../__init__.py | 2 +
.../client.py | 133 +
.../large_language_models_gpt3/__init__.py | 2 +
.../large_language_models_gpt3/client.py | 133 +
src/gooey/letter_writer/__init__.py | 2 +
src/gooey/letter_writer/client.py | 133 +
src/gooey/lip_syncing/__init__.py | 3 +
src/gooey/lip_syncing/client.py | 247 +-
src/gooey/lip_syncing/types/__init__.py | 5 +
...ync_form_lipsync_request_selected_model.py | 5 +
.../lipsync_video_with_any_text/__init__.py | 2 +
.../lipsync_video_with_any_text/client.py | 133 +
src/gooey/misc/client.py | 34 +-
.../__init__.py | 2 +
.../client.py | 133 +
.../__init__.py | 2 +
.../client.py | 133 +
.../__init__.py | 2 +
.../client.py | 133 +
.../search_your_docs_with_gpt/__init__.py | 2 +
src/gooey/search_your_docs_with_gpt/client.py | 133 +
src/gooey/smart_gpt/__init__.py | 3 +
src/gooey/smart_gpt/client.py | 229 +-
src/gooey/smart_gpt/types/__init__.py | 6 +
..._smart_gpt_request_response_format_type.py | 5 +
...c_form_smart_gpt_request_selected_model.py | 47 +
.../__init__.py | 2 +
.../speech_recognition_translation/client.py | 133 +
.../summarize_your_docs_with_gpt/__init__.py | 2 +
.../summarize_your_docs_with_gpt/client.py | 133 +
.../__init__.py | 2 +
.../client.py | 133 +
.../text_guided_audio_generator/__init__.py | 2 +
.../text_guided_audio_generator/client.py | 133 +
src/gooey/types/__init__.py | 452 +-
.../types/animate_request_selected_model.py | 5 +
src/gooey/types/asr_page_request.py | 49 -
.../types/asr_page_request_output_format.py | 5 -
.../asr_page_request_translation_model.py | 5 -
src/gooey/types/asr_page_response.py | 39 -
.../types/body_async_form_art_qr_code.py | 20 -
src/gooey/types/body_async_form_asr.py | 20 -
src/gooey/types/body_async_form_bulk_eval.py | 20 -
.../types/body_async_form_bulk_runner.py | 20 -
.../types/body_async_form_chyron_plant.py | 20 -
.../body_async_form_compare_ai_upscalers.py | 20 -
.../types/body_async_form_compare_llm.py | 20 -
.../types/body_async_form_compare_text2img.py | 20 -
src/gooey/types/body_async_form_deforum_sd.py | 20 -
.../types/body_async_form_doc_extract.py | 20 -
src/gooey/types/body_async_form_doc_search.py | 20 -
.../types/body_async_form_doc_summary.py | 20 -
.../body_async_form_email_face_inpainting.py | 20 -
src/gooey/types/body_async_form_embeddings.py | 20 -
.../types/body_async_form_face_inpainting.py | 20 -
src/gooey/types/body_async_form_functions.py | 20 -
src/gooey/types/body_async_form_google_gpt.py | 20 -
.../types/body_async_form_google_image_gen.py | 20 -
.../body_async_form_image_segmentation.py | 20 -
src/gooey/types/body_async_form_img2img.py | 20 -
.../types/body_async_form_letter_writer.py | 20 -
src/gooey/types/body_async_form_lipsync.py | 20 -
.../types/body_async_form_lipsync_tts.py | 20 -
.../body_async_form_object_inpainting.py | 20 -
.../body_async_form_related_qna_maker.py | 20 -
.../body_async_form_related_qna_maker_doc.py | 20 -
.../types/body_async_form_seo_summary.py | 20 -
src/gooey/types/body_async_form_smart_gpt.py | 20 -
.../body_async_form_social_lookup_email.py | 20 -
src/gooey/types/body_async_form_text2audio.py | 20 -
.../types/body_async_form_text_to_speech.py | 20 -
src/gooey/types/body_async_form_translate.py | 20 -
src/gooey/types/body_async_form_video_bots.py | 20 -
src/gooey/types/bulk_eval_page_request.py | 57 -
src/gooey/types/bulk_eval_page_response.py | 39 -
src/gooey/types/bulk_runner_page_request.py | 56 -
src/gooey/types/bulk_runner_page_response.py | 39 -
src/gooey/types/chyron_plant_page_response.py | 39 -
src/gooey/types/compare_llm_page_request.py | 38 -
src/gooey/types/compare_llm_page_response.py | 39 -
.../types/compare_text2img_page_request.py | 45 -
.../types/compare_text2img_page_response.py | 39 -
.../types/compare_upscaler_page_request.py | 46 -
.../types/compare_upscaler_page_response.py | 39 -
src/gooey/types/deforum_sd_page_request.py | 42 -
.../deforum_sd_page_request_selected_model.py | 5 -
src/gooey/types/deforum_sd_page_response.py | 39 -
src/gooey/types/doc_extract_page_request.py | 49 -
...tract_page_request_response_format_type.py | 5 -
src/gooey/types/doc_extract_page_response.py | 39 -
src/gooey/types/doc_search_page_request.py | 57 -
...earch_page_request_response_format_type.py | 5 -
src/gooey/types/doc_search_page_response.py | 39 -
src/gooey/types/doc_summary_page_request.py | 44 -
...mmary_page_request_response_format_type.py | 5 -
...doc_summary_page_request_selected_model.py | 47 -
src/gooey/types/doc_summary_page_response.py | 39 -
...oc_summary_request_response_format_type.py | 5 +
...doc_summary_request_selected_asr_model.py} | 2 +-
.../doc_summary_request_selected_model.py} | 2 +-
.../email_face_inpainting_page_request.py | 52 -
..._inpainting_page_request_selected_model.py | 7 -
.../email_face_inpainting_page_response.py | 39 -
.../embed_request_selected_model.py} | 2 +-
src/gooey/types/embeddings_page_request.py | 31 -
src/gooey/types/embeddings_page_response.py | 39 -
.../types/face_inpainting_page_request.py | 43 -
.../types/face_inpainting_page_response.py | 39 -
src/gooey/types/functions_page_request.py | 31 -
src/gooey/types/functions_page_response.py | 39 -
src/gooey/types/google_gpt_page_request.py | 67 -
...e_gpt_page_request_response_format_type.py | 5 -
src/gooey/types/google_gpt_page_response.py | 39 -
.../types/google_image_gen_page_request.py | 47 -
.../types/google_image_gen_page_response.py | 39 -
...mage_from_email_request_selected_model.py} | 2 +-
...from_web_search_request_selected_model.py} | 2 +-
.../types/image_segmentation_page_request.py | 37 -
...egmentation_page_request_selected_model.py | 5 -
.../types/image_segmentation_page_response.py | 39 -
src/gooey/types/img2img_page_request.py | 44 -
src/gooey/types/img2img_page_response.py | 39 -
.../types/letter_writer_page_response.py | 39 -
src/gooey/types/lipsync_page_request.py | 38 -
.../lipsync_page_request_selected_model.py | 5 -
src/gooey/types/lipsync_page_response.py | 39 -
src/gooey/types/lipsync_tts_page_request.py | 63 -
...psync_tts_page_request_openai_tts_model.py | 5 -
...lipsync_tts_page_request_selected_model.py | 5 -
src/gooey/types/lipsync_tts_page_response.py | 39 -
.../lipsync_tts_request_openai_tts_model.py | 5 +
.../lipsync_tts_request_openai_voice_name.py} | 2 +-
.../lipsync_tts_request_selected_model.py | 5 +
.../lipsync_tts_request_tts_provider.py} | 2 +-
.../types/llm_request_response_format_type.py | 5 +
...py => llm_request_selected_models_item.py} | 2 +-
.../types/object_inpainting_page_request.py | 44 -
..._inpainting_page_request_selected_model.py | 7 -
.../types/object_inpainting_page_response.py | 39 -
...lize_email_request_response_format_type.py | 5 +
...rsonalize_email_request_selected_model.py} | 2 +-
.../types/portrait_request_selected_model.py | 5 +
.../product_image_request_selected_model.py | 5 +
.../types/qr_code_generator_page_request.py | 67 -
.../types/qr_code_generator_page_response.py | 39 -
...st_image_prompt_controlnet_models_item.py} | 2 +-
...eduler.py => qr_code_request_scheduler.py} | 2 +-
...request_selected_controlnet_model_item.py} | 2 +-
...l.py => qr_code_request_selected_model.py} | 2 +-
.../rag_request_citation_style.py} | 2 +-
...odel.py => rag_request_embedding_model.py} | 2 +-
..._query.py => rag_request_keyword_query.py} | 2 +-
.../types/rag_request_response_format_type.py | 5 +
...model.py => rag_request_selected_model.py} | 2 +-
.../types/related_qn_a_doc_page_request.py | 71 -
...d_qn_a_doc_page_request_embedding_model.py | 18 -
...ed_qn_a_doc_page_request_selected_model.py | 47 -
.../types/related_qn_a_doc_page_response.py | 39 -
src/gooey/types/related_qn_a_page_request.py | 67 -
...lated_qn_a_page_request_embedding_model.py | 18 -
..._qn_a_page_request_response_format_type.py | 5 -
...elated_qn_a_page_request_selected_model.py | 47 -
src/gooey/types/related_qn_a_page_response.py | 39 -
...mage_request_selected_controlnet_model.py} | 6 +-
...request_selected_controlnet_model_item.py} | 2 +-
... => remix_image_request_selected_model.py} | 2 +-
...emove_background_request_selected_model.py | 5 +
...eo_content_request_response_format_type.py | 5 +
... => seo_content_request_selected_model.py} | 2 +-
...le_also_ask_doc_request_citation_style.py} | 2 +-
...le_also_ask_doc_request_embedding_model.py | 18 +
...ple_also_ask_doc_request_keyword_query.py} | 2 +-
...so_ask_doc_request_response_format_type.py | 5 +
...le_also_ask_doc_request_selected_model.py} | 2 +-
...eople_also_ask_request_embedding_model.py} | 2 +-
...e_also_ask_request_response_format_type.py | 5 +
..._people_also_ask_request_selected_model.py | 47 +
src/gooey/types/seo_summary_page_request.py | 53 -
...mmary_page_request_response_format_type.py | 5 -
...seo_summary_page_request_selected_model.py | 47 -
src/gooey/types/seo_summary_page_response.py | 39 -
src/gooey/types/smart_gpt_page_request.py | 41 -
...t_gpt_page_request_response_format_type.py | 5 -
src/gooey/types/smart_gpt_page_response.py | 39 -
.../types/social_lookup_email_page_request.py | 39 -
...ookup_email_page_request_selected_model.py | 47 -
.../social_lookup_email_page_response.py | 39 -
...peech_recognition_request_output_format.py | 5 +
...ech_recognition_request_selected_model.py} | 2 +-
...h_recognition_request_translation_model.py | 5 +
...size_data_request_response_format_type.py} | 2 +-
...hesize_data_request_selected_asr_model.py} | 2 +-
...synthesize_data_request_selected_model.py} | 2 +-
src/gooey/types/text2audio_page_request.py | 37 -
src/gooey/types/text2audio_page_response.py | 39 -
....py => text_to_image_request_scheduler.py} | 2 +-
..._to_image_request_selected_models_item.py} | 2 +-
.../types/text_to_speech_page_request.py | 54 -
...to_speech_page_request_openai_tts_model.py | 5 -
.../types/text_to_speech_page_response.py | 39 -
...ext_to_speech_request_openai_tts_model.py} | 2 +-
...xt_to_speech_request_openai_voice_name.py} | 2 +-
...=> text_to_speech_request_tts_provider.py} | 2 +-
.../types/translate_request_selected_model.py | 5 +
src/gooey/types/translation_page_request.py | 39 -
...translation_page_request_selected_model.py | 5 -
src/gooey/types/translation_page_response.py | 39 -
...> upscale_request_selected_models_item.py} | 2 +-
src/gooey/types/video_bots_page_request.py | 140 -
...video_bots_page_request_embedding_model.py | 18 -
.../video_bots_page_request_lipsync_model.py | 5 -
...ideo_bots_page_request_openai_tts_model.py | 5 -
..._bots_page_request_response_format_type.py | 5 -
.../video_bots_page_request_selected_model.py | 47 -
...deo_bots_page_request_translation_model.py | 5 -
src/gooey/types/video_bots_page_response.py | 39 -
...web_search_llm_request_embedding_model.py} | 2 +-
...earch_llm_request_response_format_type.py} | 2 +-
.../web_search_llm_request_selected_model.py | 47 +
src/gooey/web_search_gpt3/__init__.py | 2 +
src/gooey/web_search_gpt3/client.py | 133 +
291 files changed, 18457 insertions(+), 6745 deletions(-)
create mode 100644 src/gooey/ai_animation_generator/__init__.py
create mode 100644 src/gooey/ai_animation_generator/client.py
create mode 100644 src/gooey/ai_art_qr_code/__init__.py
create mode 100644 src/gooey/ai_art_qr_code/client.py
create mode 100644 src/gooey/ai_background_changer/__init__.py
create mode 100644 src/gooey/ai_background_changer/client.py
create mode 100644 src/gooey/ai_generated_photo_from_email_profile_lookup/__init__.py
create mode 100644 src/gooey/ai_generated_photo_from_email_profile_lookup/client.py
create mode 100644 src/gooey/ai_image_with_a_face/__init__.py
create mode 100644 src/gooey/ai_image_with_a_face/client.py
create mode 100644 src/gooey/chyron_plant_bot/__init__.py
create mode 100644 src/gooey/chyron_plant_bot/client.py
create mode 100644 src/gooey/compare_ai_image_generators/__init__.py
create mode 100644 src/gooey/compare_ai_image_generators/client.py
create mode 100644 src/gooey/compare_ai_image_upscalers/__init__.py
create mode 100644 src/gooey/compare_ai_image_upscalers/client.py
create mode 100644 src/gooey/compare_ai_translations/__init__.py
create mode 100644 src/gooey/compare_ai_translations/client.py
create mode 100644 src/gooey/compare_ai_voice_generators/__init__.py
create mode 100644 src/gooey/compare_ai_voice_generators/client.py
create mode 100644 src/gooey/copilot_for_your_enterprise/types/__init__.py
rename src/gooey/{copilot_integrations/types/create_stream_request_asr_model.py => copilot_for_your_enterprise/types/async_form_video_bots_request_asr_model.py} (90%)
rename src/gooey/{types/doc_search_page_request_citation_style.py => copilot_for_your_enterprise/types/async_form_video_bots_request_citation_style.py} (89%)
create mode 100644 src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_embedding_model.py
create mode 100644 src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_lipsync_model.py
create mode 100644 src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_openai_tts_model.py
rename src/gooey/{types/text_to_speech_page_request_openai_voice_name.py => copilot_for_your_enterprise/types/async_form_video_bots_request_openai_voice_name.py} (74%)
create mode 100644 src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_response_format_type.py
create mode 100644 src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_selected_model.py
create mode 100644 src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_translation_model.py
rename src/gooey/{types/lipsync_tts_page_request_tts_provider.py => copilot_for_your_enterprise/types/async_form_video_bots_request_tts_provider.py} (77%)
delete mode 100644 src/gooey/copilot_integrations/types/create_stream_request_lipsync_model.py
delete mode 100644 src/gooey/copilot_integrations/types/create_stream_request_response_format_type.py
delete mode 100644 src/gooey/copilot_integrations/types/create_stream_request_translation_model.py
rename src/gooey/{types/asr_page_request_selected_model.py => copilot_integrations/types/video_bots_stream_create_request_asr_model.py} (89%)
rename src/gooey/{types/video_bots_page_request_citation_style.py => copilot_integrations/types/video_bots_stream_create_request_citation_style.py} (89%)
create mode 100644 src/gooey/copilot_integrations/types/video_bots_stream_create_request_embedding_model.py
create mode 100644 src/gooey/copilot_integrations/types/video_bots_stream_create_request_lipsync_model.py
create mode 100644 src/gooey/copilot_integrations/types/video_bots_stream_create_request_openai_tts_model.py
rename src/gooey/{types/lipsync_tts_page_request_openai_voice_name.py => copilot_integrations/types/video_bots_stream_create_request_openai_voice_name.py} (73%)
rename src/gooey/{types/social_lookup_email_page_request_response_format_type.py => copilot_integrations/types/video_bots_stream_create_request_response_format_type.py} (66%)
create mode 100644 src/gooey/copilot_integrations/types/video_bots_stream_create_request_selected_model.py
create mode 100644 src/gooey/copilot_integrations/types/video_bots_stream_create_request_translation_model.py
rename src/gooey/{types/text_to_speech_page_request_tts_provider.py => copilot_integrations/types/video_bots_stream_create_request_tts_provider.py} (76%)
create mode 100644 src/gooey/create_a_perfect_seo_optimized_title_paragraph/__init__.py
create mode 100644 src/gooey/create_a_perfect_seo_optimized_title_paragraph/client.py
create mode 100644 src/gooey/edit_an_image_with_ai_prompt/__init__.py
create mode 100644 src/gooey/edit_an_image_with_ai_prompt/client.py
create mode 100644 src/gooey/evaluator/types/__init__.py
rename src/gooey/{types/related_qn_a_doc_page_request_response_format_type.py => evaluator/types/async_form_bulk_eval_request_response_format_type.py} (66%)
create mode 100644 src/gooey/evaluator/types/async_form_bulk_eval_request_selected_model.py
create mode 100644 src/gooey/generate_people_also_ask_seo_content/__init__.py
create mode 100644 src/gooey/generate_people_also_ask_seo_content/client.py
create mode 100644 src/gooey/generate_product_photo_backgrounds/__init__.py
create mode 100644 src/gooey/generate_product_photo_backgrounds/client.py
create mode 100644 src/gooey/large_language_models_gpt3/__init__.py
create mode 100644 src/gooey/large_language_models_gpt3/client.py
create mode 100644 src/gooey/letter_writer/__init__.py
create mode 100644 src/gooey/letter_writer/client.py
create mode 100644 src/gooey/lip_syncing/types/__init__.py
create mode 100644 src/gooey/lip_syncing/types/async_form_lipsync_request_selected_model.py
create mode 100644 src/gooey/lipsync_video_with_any_text/__init__.py
create mode 100644 src/gooey/lipsync_video_with_any_text/client.py
create mode 100644 src/gooey/people_also_ask_answers_from_a_doc/__init__.py
create mode 100644 src/gooey/people_also_ask_answers_from_a_doc/client.py
create mode 100644 src/gooey/profile_lookup_gpt3for_ai_personalized_emails/__init__.py
create mode 100644 src/gooey/profile_lookup_gpt3for_ai_personalized_emails/client.py
create mode 100644 src/gooey/render_image_search_results_with_ai/__init__.py
create mode 100644 src/gooey/render_image_search_results_with_ai/client.py
create mode 100644 src/gooey/search_your_docs_with_gpt/__init__.py
create mode 100644 src/gooey/search_your_docs_with_gpt/client.py
create mode 100644 src/gooey/smart_gpt/types/__init__.py
create mode 100644 src/gooey/smart_gpt/types/async_form_smart_gpt_request_response_format_type.py
create mode 100644 src/gooey/smart_gpt/types/async_form_smart_gpt_request_selected_model.py
create mode 100644 src/gooey/speech_recognition_translation/__init__.py
create mode 100644 src/gooey/speech_recognition_translation/client.py
create mode 100644 src/gooey/summarize_your_docs_with_gpt/__init__.py
create mode 100644 src/gooey/summarize_your_docs_with_gpt/client.py
create mode 100644 src/gooey/synthetic_data_maker_for_videos_pd_fs/__init__.py
create mode 100644 src/gooey/synthetic_data_maker_for_videos_pd_fs/client.py
create mode 100644 src/gooey/text_guided_audio_generator/__init__.py
create mode 100644 src/gooey/text_guided_audio_generator/client.py
create mode 100644 src/gooey/types/animate_request_selected_model.py
delete mode 100644 src/gooey/types/asr_page_request.py
delete mode 100644 src/gooey/types/asr_page_request_output_format.py
delete mode 100644 src/gooey/types/asr_page_request_translation_model.py
delete mode 100644 src/gooey/types/asr_page_response.py
delete mode 100644 src/gooey/types/body_async_form_art_qr_code.py
delete mode 100644 src/gooey/types/body_async_form_asr.py
delete mode 100644 src/gooey/types/body_async_form_bulk_eval.py
delete mode 100644 src/gooey/types/body_async_form_bulk_runner.py
delete mode 100644 src/gooey/types/body_async_form_chyron_plant.py
delete mode 100644 src/gooey/types/body_async_form_compare_ai_upscalers.py
delete mode 100644 src/gooey/types/body_async_form_compare_llm.py
delete mode 100644 src/gooey/types/body_async_form_compare_text2img.py
delete mode 100644 src/gooey/types/body_async_form_deforum_sd.py
delete mode 100644 src/gooey/types/body_async_form_doc_extract.py
delete mode 100644 src/gooey/types/body_async_form_doc_search.py
delete mode 100644 src/gooey/types/body_async_form_doc_summary.py
delete mode 100644 src/gooey/types/body_async_form_email_face_inpainting.py
delete mode 100644 src/gooey/types/body_async_form_embeddings.py
delete mode 100644 src/gooey/types/body_async_form_face_inpainting.py
delete mode 100644 src/gooey/types/body_async_form_functions.py
delete mode 100644 src/gooey/types/body_async_form_google_gpt.py
delete mode 100644 src/gooey/types/body_async_form_google_image_gen.py
delete mode 100644 src/gooey/types/body_async_form_image_segmentation.py
delete mode 100644 src/gooey/types/body_async_form_img2img.py
delete mode 100644 src/gooey/types/body_async_form_letter_writer.py
delete mode 100644 src/gooey/types/body_async_form_lipsync.py
delete mode 100644 src/gooey/types/body_async_form_lipsync_tts.py
delete mode 100644 src/gooey/types/body_async_form_object_inpainting.py
delete mode 100644 src/gooey/types/body_async_form_related_qna_maker.py
delete mode 100644 src/gooey/types/body_async_form_related_qna_maker_doc.py
delete mode 100644 src/gooey/types/body_async_form_seo_summary.py
delete mode 100644 src/gooey/types/body_async_form_smart_gpt.py
delete mode 100644 src/gooey/types/body_async_form_social_lookup_email.py
delete mode 100644 src/gooey/types/body_async_form_text2audio.py
delete mode 100644 src/gooey/types/body_async_form_text_to_speech.py
delete mode 100644 src/gooey/types/body_async_form_translate.py
delete mode 100644 src/gooey/types/body_async_form_video_bots.py
delete mode 100644 src/gooey/types/bulk_eval_page_request.py
delete mode 100644 src/gooey/types/bulk_eval_page_response.py
delete mode 100644 src/gooey/types/bulk_runner_page_request.py
delete mode 100644 src/gooey/types/bulk_runner_page_response.py
delete mode 100644 src/gooey/types/chyron_plant_page_response.py
delete mode 100644 src/gooey/types/compare_llm_page_request.py
delete mode 100644 src/gooey/types/compare_llm_page_response.py
delete mode 100644 src/gooey/types/compare_text2img_page_request.py
delete mode 100644 src/gooey/types/compare_text2img_page_response.py
delete mode 100644 src/gooey/types/compare_upscaler_page_request.py
delete mode 100644 src/gooey/types/compare_upscaler_page_response.py
delete mode 100644 src/gooey/types/deforum_sd_page_request.py
delete mode 100644 src/gooey/types/deforum_sd_page_request_selected_model.py
delete mode 100644 src/gooey/types/deforum_sd_page_response.py
delete mode 100644 src/gooey/types/doc_extract_page_request.py
delete mode 100644 src/gooey/types/doc_extract_page_request_response_format_type.py
delete mode 100644 src/gooey/types/doc_extract_page_response.py
delete mode 100644 src/gooey/types/doc_search_page_request.py
delete mode 100644 src/gooey/types/doc_search_page_request_response_format_type.py
delete mode 100644 src/gooey/types/doc_search_page_response.py
delete mode 100644 src/gooey/types/doc_summary_page_request.py
delete mode 100644 src/gooey/types/doc_summary_page_request_response_format_type.py
delete mode 100644 src/gooey/types/doc_summary_page_request_selected_model.py
delete mode 100644 src/gooey/types/doc_summary_page_response.py
create mode 100644 src/gooey/types/doc_summary_request_response_format_type.py
rename src/gooey/types/{video_bots_page_request_asr_model.py => doc_summary_request_selected_asr_model.py} (90%)
rename src/gooey/{copilot_integrations/types/create_stream_request_selected_model.py => types/doc_summary_request_selected_model.py} (95%)
delete mode 100644 src/gooey/types/email_face_inpainting_page_request.py
delete mode 100644 src/gooey/types/email_face_inpainting_page_request_selected_model.py
delete mode 100644 src/gooey/types/email_face_inpainting_page_response.py
rename src/gooey/{copilot_integrations/types/create_stream_request_embedding_model.py => types/embed_request_selected_model.py} (87%)
delete mode 100644 src/gooey/types/embeddings_page_request.py
delete mode 100644 src/gooey/types/embeddings_page_response.py
delete mode 100644 src/gooey/types/face_inpainting_page_request.py
delete mode 100644 src/gooey/types/face_inpainting_page_response.py
delete mode 100644 src/gooey/types/functions_page_request.py
delete mode 100644 src/gooey/types/functions_page_response.py
delete mode 100644 src/gooey/types/google_gpt_page_request.py
delete mode 100644 src/gooey/types/google_gpt_page_request_response_format_type.py
delete mode 100644 src/gooey/types/google_gpt_page_response.py
delete mode 100644 src/gooey/types/google_image_gen_page_request.py
delete mode 100644 src/gooey/types/google_image_gen_page_response.py
rename src/gooey/types/{face_inpainting_page_request_selected_model.py => image_from_email_request_selected_model.py} (74%)
rename src/gooey/types/{google_image_gen_page_request_selected_model.py => image_from_web_search_request_selected_model.py} (88%)
delete mode 100644 src/gooey/types/image_segmentation_page_request.py
delete mode 100644 src/gooey/types/image_segmentation_page_request_selected_model.py
delete mode 100644 src/gooey/types/image_segmentation_page_response.py
delete mode 100644 src/gooey/types/img2img_page_request.py
delete mode 100644 src/gooey/types/img2img_page_response.py
delete mode 100644 src/gooey/types/letter_writer_page_response.py
delete mode 100644 src/gooey/types/lipsync_page_request.py
delete mode 100644 src/gooey/types/lipsync_page_request_selected_model.py
delete mode 100644 src/gooey/types/lipsync_page_response.py
delete mode 100644 src/gooey/types/lipsync_tts_page_request.py
delete mode 100644 src/gooey/types/lipsync_tts_page_request_openai_tts_model.py
delete mode 100644 src/gooey/types/lipsync_tts_page_request_selected_model.py
delete mode 100644 src/gooey/types/lipsync_tts_page_response.py
create mode 100644 src/gooey/types/lipsync_tts_request_openai_tts_model.py
rename src/gooey/{copilot_integrations/types/create_stream_request_openai_voice_name.py => types/lipsync_tts_request_openai_voice_name.py} (76%)
create mode 100644 src/gooey/types/lipsync_tts_request_selected_model.py
rename src/gooey/{copilot_integrations/types/create_stream_request_tts_provider.py => types/lipsync_tts_request_tts_provider.py} (79%)
create mode 100644 src/gooey/types/llm_request_response_format_type.py
rename src/gooey/types/{smart_gpt_page_request_selected_model.py => llm_request_selected_models_item.py} (95%)
delete mode 100644 src/gooey/types/object_inpainting_page_request.py
delete mode 100644 src/gooey/types/object_inpainting_page_request_selected_model.py
delete mode 100644 src/gooey/types/object_inpainting_page_response.py
create mode 100644 src/gooey/types/personalize_email_request_response_format_type.py
rename src/gooey/types/{doc_extract_page_request_selected_model.py => personalize_email_request_selected_model.py} (95%)
create mode 100644 src/gooey/types/portrait_request_selected_model.py
create mode 100644 src/gooey/types/product_image_request_selected_model.py
delete mode 100644 src/gooey/types/qr_code_generator_page_request.py
delete mode 100644 src/gooey/types/qr_code_generator_page_response.py
rename src/gooey/types/{img2img_page_request_selected_controlnet_model_item.py => qr_code_request_image_prompt_controlnet_models_item.py} (88%)
rename src/gooey/types/{qr_code_generator_page_request_scheduler.py => qr_code_request_scheduler.py} (89%)
rename src/gooey/types/{qr_code_generator_page_request_selected_controlnet_model_item.py => qr_code_request_selected_controlnet_model_item.py} (87%)
rename src/gooey/types/{qr_code_generator_page_request_selected_model.py => qr_code_request_selected_model.py} (88%)
rename src/gooey/{copilot_integrations/types/create_stream_request_citation_style.py => types/rag_request_citation_style.py} (90%)
rename src/gooey/types/{google_gpt_page_request_embedding_model.py => rag_request_embedding_model.py} (87%)
rename src/gooey/types/{doc_search_page_request_keyword_query.py => rag_request_keyword_query.py} (52%)
create mode 100644 src/gooey/types/rag_request_response_format_type.py
rename src/gooey/types/{bulk_eval_page_request_selected_model.py => rag_request_selected_model.py} (95%)
delete mode 100644 src/gooey/types/related_qn_a_doc_page_request.py
delete mode 100644 src/gooey/types/related_qn_a_doc_page_request_embedding_model.py
delete mode 100644 src/gooey/types/related_qn_a_doc_page_request_selected_model.py
delete mode 100644 src/gooey/types/related_qn_a_doc_page_response.py
delete mode 100644 src/gooey/types/related_qn_a_page_request.py
delete mode 100644 src/gooey/types/related_qn_a_page_request_embedding_model.py
delete mode 100644 src/gooey/types/related_qn_a_page_request_response_format_type.py
delete mode 100644 src/gooey/types/related_qn_a_page_request_selected_model.py
delete mode 100644 src/gooey/types/related_qn_a_page_response.py
rename src/gooey/types/{img2img_page_request_selected_controlnet_model.py => remix_image_request_selected_controlnet_model.py} (71%)
rename src/gooey/types/{qr_code_generator_page_request_image_prompt_controlnet_models_item.py => remix_image_request_selected_controlnet_model_item.py} (86%)
rename src/gooey/types/{img2img_page_request_selected_model.py => remix_image_request_selected_model.py} (89%)
create mode 100644 src/gooey/types/remove_background_request_selected_model.py
create mode 100644 src/gooey/types/seo_content_request_response_format_type.py
rename src/gooey/types/{google_gpt_page_request_selected_model.py => seo_content_request_selected_model.py} (95%)
rename src/gooey/types/{related_qn_a_doc_page_request_citation_style.py => seo_people_also_ask_doc_request_citation_style.py} (89%)
create mode 100644 src/gooey/types/seo_people_also_ask_doc_request_embedding_model.py
rename src/gooey/types/{related_qn_a_doc_page_request_keyword_query.py => seo_people_also_ask_doc_request_keyword_query.py} (50%)
create mode 100644 src/gooey/types/seo_people_also_ask_doc_request_response_format_type.py
rename src/gooey/types/{compare_llm_page_request_selected_models_item.py => seo_people_also_ask_doc_request_selected_model.py} (95%)
rename src/gooey/types/{doc_search_page_request_embedding_model.py => seo_people_also_ask_request_embedding_model.py} (86%)
create mode 100644 src/gooey/types/seo_people_also_ask_request_response_format_type.py
create mode 100644 src/gooey/types/seo_people_also_ask_request_selected_model.py
delete mode 100644 src/gooey/types/seo_summary_page_request.py
delete mode 100644 src/gooey/types/seo_summary_page_request_response_format_type.py
delete mode 100644 src/gooey/types/seo_summary_page_request_selected_model.py
delete mode 100644 src/gooey/types/seo_summary_page_response.py
delete mode 100644 src/gooey/types/smart_gpt_page_request.py
delete mode 100644 src/gooey/types/smart_gpt_page_request_response_format_type.py
delete mode 100644 src/gooey/types/smart_gpt_page_response.py
delete mode 100644 src/gooey/types/social_lookup_email_page_request.py
delete mode 100644 src/gooey/types/social_lookup_email_page_request_selected_model.py
delete mode 100644 src/gooey/types/social_lookup_email_page_response.py
create mode 100644 src/gooey/types/speech_recognition_request_output_format.py
rename src/gooey/types/{doc_extract_page_request_selected_asr_model.py => speech_recognition_request_selected_model.py} (89%)
create mode 100644 src/gooey/types/speech_recognition_request_translation_model.py
rename src/gooey/types/{compare_llm_page_request_response_format_type.py => synthesize_data_request_response_format_type.py} (65%)
rename src/gooey/types/{doc_summary_page_request_selected_asr_model.py => synthesize_data_request_selected_asr_model.py} (89%)
rename src/gooey/types/{doc_search_page_request_selected_model.py => synthesize_data_request_selected_model.py} (95%)
delete mode 100644 src/gooey/types/text2audio_page_request.py
delete mode 100644 src/gooey/types/text2audio_page_response.py
rename src/gooey/types/{compare_text2img_page_request_scheduler.py => text_to_image_request_scheduler.py} (89%)
rename src/gooey/types/{compare_text2img_page_request_selected_models_item.py => text_to_image_request_selected_models_item.py} (87%)
delete mode 100644 src/gooey/types/text_to_speech_page_request.py
delete mode 100644 src/gooey/types/text_to_speech_page_request_openai_tts_model.py
delete mode 100644 src/gooey/types/text_to_speech_page_response.py
rename src/gooey/{copilot_integrations/types/create_stream_request_openai_tts_model.py => types/text_to_speech_request_openai_tts_model.py} (64%)
rename src/gooey/types/{video_bots_page_request_openai_voice_name.py => text_to_speech_request_openai_voice_name.py} (76%)
rename src/gooey/types/{video_bots_page_request_tts_provider.py => text_to_speech_request_tts_provider.py} (79%)
create mode 100644 src/gooey/types/translate_request_selected_model.py
delete mode 100644 src/gooey/types/translation_page_request.py
delete mode 100644 src/gooey/types/translation_page_request_selected_model.py
delete mode 100644 src/gooey/types/translation_page_response.py
rename src/gooey/types/{compare_upscaler_page_request_selected_models_item.py => upscale_request_selected_models_item.py} (74%)
delete mode 100644 src/gooey/types/video_bots_page_request.py
delete mode 100644 src/gooey/types/video_bots_page_request_embedding_model.py
delete mode 100644 src/gooey/types/video_bots_page_request_lipsync_model.py
delete mode 100644 src/gooey/types/video_bots_page_request_openai_tts_model.py
delete mode 100644 src/gooey/types/video_bots_page_request_response_format_type.py
delete mode 100644 src/gooey/types/video_bots_page_request_selected_model.py
delete mode 100644 src/gooey/types/video_bots_page_request_translation_model.py
delete mode 100644 src/gooey/types/video_bots_page_response.py
rename src/gooey/types/{embeddings_page_request_selected_model.py => web_search_llm_request_embedding_model.py} (87%)
rename src/gooey/types/{bulk_eval_page_request_response_format_type.py => web_search_llm_request_response_format_type.py} (65%)
create mode 100644 src/gooey/types/web_search_llm_request_selected_model.py
create mode 100644 src/gooey/web_search_gpt3/__init__.py
create mode 100644 src/gooey/web_search_gpt3/client.py
diff --git a/README.md b/README.md
index 020f347..278b73e 100644
--- a/README.md
+++ b/README.md
@@ -16,12 +16,19 @@ pip install gooeyai
Instantiate and use the client with the following:
```python
-from gooey import Gooey
+from gooey import AnimationPrompt, Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.animate()
+client.animate(
+ animation_prompts=[
+ AnimationPrompt(
+ frame="frame",
+ prompt="prompt",
+ )
+ ],
+)
```
## Async Client
@@ -31,7 +38,7 @@ The SDK also exports an `async` client so that you can make non-blocking calls t
```python
import asyncio
-from gooey import AsyncGooey
+from gooey import AnimationPrompt, AsyncGooey
client = AsyncGooey(
api_key="YOUR_API_KEY",
@@ -39,7 +46,14 @@ client = AsyncGooey(
async def main() -> None:
- await client.animate()
+ await client.animate(
+ animation_prompts=[
+ AnimationPrompt(
+ frame="frame",
+ prompt="prompt",
+ )
+ ],
+ )
asyncio.run(main())
diff --git a/poetry.lock b/poetry.lock
index 228c9f5..46252f8 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -236,18 +236,18 @@ testing = ["pytest", "pytest-benchmark"]
[[package]]
name = "pydantic"
-version = "2.8.2"
+version = "2.9.1"
description = "Data validation using Python type hints"
optional = false
python-versions = ">=3.8"
files = [
- {file = "pydantic-2.8.2-py3-none-any.whl", hash = "sha256:73ee9fddd406dc318b885c7a2eab8a6472b68b8fb5ba8150949fc3db939f23c8"},
- {file = "pydantic-2.8.2.tar.gz", hash = "sha256:6f62c13d067b0755ad1c21a34bdd06c0c12625a22b0fc09c6b149816604f7c2a"},
+ {file = "pydantic-2.9.1-py3-none-any.whl", hash = "sha256:7aff4db5fdf3cf573d4b3c30926a510a10e19a0774d38fc4967f78beb6deb612"},
+ {file = "pydantic-2.9.1.tar.gz", hash = "sha256:1363c7d975c7036df0db2b4a61f2e062fbc0aa5ab5f2772e0ffc7191a4f4bce2"},
]
[package.dependencies]
-annotated-types = ">=0.4.0"
-pydantic-core = "2.20.1"
+annotated-types = ">=0.6.0"
+pydantic-core = "2.23.3"
typing-extensions = [
{version = ">=4.12.2", markers = "python_version >= \"3.13\""},
{version = ">=4.6.1", markers = "python_version < \"3.13\""},
@@ -255,103 +255,104 @@ typing-extensions = [
[package.extras]
email = ["email-validator (>=2.0.0)"]
+timezone = ["tzdata"]
[[package]]
name = "pydantic-core"
-version = "2.20.1"
+version = "2.23.3"
description = "Core functionality for Pydantic validation and serialization"
optional = false
python-versions = ">=3.8"
files = [
- {file = "pydantic_core-2.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3acae97ffd19bf091c72df4d726d552c473f3576409b2a7ca36b2f535ffff4a3"},
- {file = "pydantic_core-2.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:41f4c96227a67a013e7de5ff8f20fb496ce573893b7f4f2707d065907bffdbd6"},
- {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f239eb799a2081495ea659d8d4a43a8f42cd1fe9ff2e7e436295c38a10c286a"},
- {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53e431da3fc53360db73eedf6f7124d1076e1b4ee4276b36fb25514544ceb4a3"},
- {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1f62b2413c3a0e846c3b838b2ecd6c7a19ec6793b2a522745b0869e37ab5bc1"},
- {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d41e6daee2813ecceea8eda38062d69e280b39df793f5a942fa515b8ed67953"},
- {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d482efec8b7dc6bfaedc0f166b2ce349df0011f5d2f1f25537ced4cfc34fd98"},
- {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e93e1a4b4b33daed65d781a57a522ff153dcf748dee70b40c7258c5861e1768a"},
- {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7c4ea22b6739b162c9ecaaa41d718dfad48a244909fe7ef4b54c0b530effc5a"},
- {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4f2790949cf385d985a31984907fecb3896999329103df4e4983a4a41e13e840"},
- {file = "pydantic_core-2.20.1-cp310-none-win32.whl", hash = "sha256:5e999ba8dd90e93d57410c5e67ebb67ffcaadcea0ad973240fdfd3a135506250"},
- {file = "pydantic_core-2.20.1-cp310-none-win_amd64.whl", hash = "sha256:512ecfbefef6dac7bc5eaaf46177b2de58cdf7acac8793fe033b24ece0b9566c"},
- {file = "pydantic_core-2.20.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d2a8fa9d6d6f891f3deec72f5cc668e6f66b188ab14bb1ab52422fe8e644f312"},
- {file = "pydantic_core-2.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:175873691124f3d0da55aeea1d90660a6ea7a3cfea137c38afa0a5ffabe37b88"},
- {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37eee5b638f0e0dcd18d21f59b679686bbd18917b87db0193ae36f9c23c355fc"},
- {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25e9185e2d06c16ee438ed39bf62935ec436474a6ac4f9358524220f1b236e43"},
- {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:150906b40ff188a3260cbee25380e7494ee85048584998c1e66df0c7a11c17a6"},
- {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ad4aeb3e9a97286573c03df758fc7627aecdd02f1da04516a86dc159bf70121"},
- {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3f3ed29cd9f978c604708511a1f9c2fdcb6c38b9aae36a51905b8811ee5cbf1"},
- {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0dae11d8f5ded51699c74d9548dcc5938e0804cc8298ec0aa0da95c21fff57b"},
- {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:faa6b09ee09433b87992fb5a2859efd1c264ddc37280d2dd5db502126d0e7f27"},
- {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9dc1b507c12eb0481d071f3c1808f0529ad41dc415d0ca11f7ebfc666e66a18b"},
- {file = "pydantic_core-2.20.1-cp311-none-win32.whl", hash = "sha256:fa2fddcb7107e0d1808086ca306dcade7df60a13a6c347a7acf1ec139aa6789a"},
- {file = "pydantic_core-2.20.1-cp311-none-win_amd64.whl", hash = "sha256:40a783fb7ee353c50bd3853e626f15677ea527ae556429453685ae32280c19c2"},
- {file = "pydantic_core-2.20.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:595ba5be69b35777474fa07f80fc260ea71255656191adb22a8c53aba4479231"},
- {file = "pydantic_core-2.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a4f55095ad087474999ee28d3398bae183a66be4823f753cd7d67dd0153427c9"},
- {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9aa05d09ecf4c75157197f27cdc9cfaeb7c5f15021c6373932bf3e124af029f"},
- {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e97fdf088d4b31ff4ba35db26d9cc472ac7ef4a2ff2badeabf8d727b3377fc52"},
- {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc633a9fe1eb87e250b5c57d389cf28998e4292336926b0b6cdaee353f89a237"},
- {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d573faf8eb7e6b1cbbcb4f5b247c60ca8be39fe2c674495df0eb4318303137fe"},
- {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26dc97754b57d2fd00ac2b24dfa341abffc380b823211994c4efac7f13b9e90e"},
- {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:33499e85e739a4b60c9dac710c20a08dc73cb3240c9a0e22325e671b27b70d24"},
- {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bebb4d6715c814597f85297c332297c6ce81e29436125ca59d1159b07f423eb1"},
- {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:516d9227919612425c8ef1c9b869bbbee249bc91912c8aaffb66116c0b447ebd"},
- {file = "pydantic_core-2.20.1-cp312-none-win32.whl", hash = "sha256:469f29f9093c9d834432034d33f5fe45699e664f12a13bf38c04967ce233d688"},
- {file = "pydantic_core-2.20.1-cp312-none-win_amd64.whl", hash = "sha256:035ede2e16da7281041f0e626459bcae33ed998cca6a0a007a5ebb73414ac72d"},
- {file = "pydantic_core-2.20.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:0827505a5c87e8aa285dc31e9ec7f4a17c81a813d45f70b1d9164e03a813a686"},
- {file = "pydantic_core-2.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:19c0fa39fa154e7e0b7f82f88ef85faa2a4c23cc65aae2f5aea625e3c13c735a"},
- {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa223cd1e36b642092c326d694d8bf59b71ddddc94cdb752bbbb1c5c91d833b"},
- {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c336a6d235522a62fef872c6295a42ecb0c4e1d0f1a3e500fe949415761b8a19"},
- {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7eb6a0587eded33aeefea9f916899d42b1799b7b14b8f8ff2753c0ac1741edac"},
- {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:70c8daf4faca8da5a6d655f9af86faf6ec2e1768f4b8b9d0226c02f3d6209703"},
- {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9fa4c9bf273ca41f940bceb86922a7667cd5bf90e95dbb157cbb8441008482c"},
- {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:11b71d67b4725e7e2a9f6e9c0ac1239bbc0c48cce3dc59f98635efc57d6dac83"},
- {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:270755f15174fb983890c49881e93f8f1b80f0b5e3a3cc1394a255706cabd203"},
- {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c81131869240e3e568916ef4c307f8b99583efaa60a8112ef27a366eefba8ef0"},
- {file = "pydantic_core-2.20.1-cp313-none-win32.whl", hash = "sha256:b91ced227c41aa29c672814f50dbb05ec93536abf8f43cd14ec9521ea09afe4e"},
- {file = "pydantic_core-2.20.1-cp313-none-win_amd64.whl", hash = "sha256:65db0f2eefcaad1a3950f498aabb4875c8890438bc80b19362cf633b87a8ab20"},
- {file = "pydantic_core-2.20.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4745f4ac52cc6686390c40eaa01d48b18997cb130833154801a442323cc78f91"},
- {file = "pydantic_core-2.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a8ad4c766d3f33ba8fd692f9aa297c9058970530a32c728a2c4bfd2616d3358b"},
- {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41e81317dd6a0127cabce83c0c9c3fbecceae981c8391e6f1dec88a77c8a569a"},
- {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04024d270cf63f586ad41fff13fde4311c4fc13ea74676962c876d9577bcc78f"},
- {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eaad4ff2de1c3823fddf82f41121bdf453d922e9a238642b1dedb33c4e4f98ad"},
- {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:26ab812fa0c845df815e506be30337e2df27e88399b985d0bb4e3ecfe72df31c"},
- {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c5ebac750d9d5f2706654c638c041635c385596caf68f81342011ddfa1e5598"},
- {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2aafc5a503855ea5885559eae883978c9b6d8c8993d67766ee73d82e841300dd"},
- {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4868f6bd7c9d98904b748a2653031fc9c2f85b6237009d475b1008bfaeb0a5aa"},
- {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa2f457b4af386254372dfa78a2eda2563680d982422641a85f271c859df1987"},
- {file = "pydantic_core-2.20.1-cp38-none-win32.whl", hash = "sha256:225b67a1f6d602de0ce7f6c1c3ae89a4aa25d3de9be857999e9124f15dab486a"},
- {file = "pydantic_core-2.20.1-cp38-none-win_amd64.whl", hash = "sha256:6b507132dcfc0dea440cce23ee2182c0ce7aba7054576efc65634f080dbe9434"},
- {file = "pydantic_core-2.20.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b03f7941783b4c4a26051846dea594628b38f6940a2fdc0df00b221aed39314c"},
- {file = "pydantic_core-2.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1eedfeb6089ed3fad42e81a67755846ad4dcc14d73698c120a82e4ccf0f1f9f6"},
- {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:635fee4e041ab9c479e31edda27fcf966ea9614fff1317e280d99eb3e5ab6fe2"},
- {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:77bf3ac639c1ff567ae3b47f8d4cc3dc20f9966a2a6dd2311dcc055d3d04fb8a"},
- {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ed1b0132f24beeec5a78b67d9388656d03e6a7c837394f99257e2d55b461611"},
- {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c6514f963b023aeee506678a1cf821fe31159b925c4b76fe2afa94cc70b3222b"},
- {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10d4204d8ca33146e761c79f83cc861df20e7ae9f6487ca290a97702daf56006"},
- {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2d036c7187b9422ae5b262badb87a20a49eb6c5238b2004e96d4da1231badef1"},
- {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9ebfef07dbe1d93efb94b4700f2d278494e9162565a54f124c404a5656d7ff09"},
- {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6b9d9bb600328a1ce523ab4f454859e9d439150abb0906c5a1983c146580ebab"},
- {file = "pydantic_core-2.20.1-cp39-none-win32.whl", hash = "sha256:784c1214cb6dd1e3b15dd8b91b9a53852aed16671cc3fbe4786f4f1db07089e2"},
- {file = "pydantic_core-2.20.1-cp39-none-win_amd64.whl", hash = "sha256:d2fe69c5434391727efa54b47a1e7986bb0186e72a41b203df8f5b0a19a4f669"},
- {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a45f84b09ac9c3d35dfcf6a27fd0634d30d183205230a0ebe8373a0e8cfa0906"},
- {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d02a72df14dfdbaf228424573a07af10637bd490f0901cee872c4f434a735b94"},
- {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2b27e6af28f07e2f195552b37d7d66b150adbaa39a6d327766ffd695799780f"},
- {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:084659fac3c83fd674596612aeff6041a18402f1e1bc19ca39e417d554468482"},
- {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:242b8feb3c493ab78be289c034a1f659e8826e2233786e36f2893a950a719bb6"},
- {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:38cf1c40a921d05c5edc61a785c0ddb4bed67827069f535d794ce6bcded919fc"},
- {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e0bbdd76ce9aa5d4209d65f2b27fc6e5ef1312ae6c5333c26db3f5ade53a1e99"},
- {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:254ec27fdb5b1ee60684f91683be95e5133c994cc54e86a0b0963afa25c8f8a6"},
- {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:407653af5617f0757261ae249d3fba09504d7a71ab36ac057c938572d1bc9331"},
- {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c693e916709c2465b02ca0ad7b387c4f8423d1db7b4649c551f27a529181c5ad"},
- {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b5ff4911aea936a47d9376fd3ab17e970cc543d1b68921886e7f64bd28308d1"},
- {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177f55a886d74f1808763976ac4efd29b7ed15c69f4d838bbd74d9d09cf6fa86"},
- {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:964faa8a861d2664f0c7ab0c181af0bea66098b1919439815ca8803ef136fc4e"},
- {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4dd484681c15e6b9a977c785a345d3e378d72678fd5f1f3c0509608da24f2ac0"},
- {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f6d6cff3538391e8486a431569b77921adfcdef14eb18fbf19b7c0a5294d4e6a"},
- {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a6d511cc297ff0883bc3708b465ff82d7560193169a8b93260f74ecb0a5e08a7"},
- {file = "pydantic_core-2.20.1.tar.gz", hash = "sha256:26ca695eeee5f9f1aeeb211ffc12f10bcb6f71e2989988fda61dabd65db878d4"},
+ {file = "pydantic_core-2.23.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:7f10a5d1b9281392f1bf507d16ac720e78285dfd635b05737c3911637601bae6"},
+ {file = "pydantic_core-2.23.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3c09a7885dd33ee8c65266e5aa7fb7e2f23d49d8043f089989726391dd7350c5"},
+ {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6470b5a1ec4d1c2e9afe928c6cb37eb33381cab99292a708b8cb9aa89e62429b"},
+ {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9172d2088e27d9a185ea0a6c8cebe227a9139fd90295221d7d495944d2367700"},
+ {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86fc6c762ca7ac8fbbdff80d61b2c59fb6b7d144aa46e2d54d9e1b7b0e780e01"},
+ {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0cb80fd5c2df4898693aa841425ea1727b1b6d2167448253077d2a49003e0ed"},
+ {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03667cec5daf43ac4995cefa8aaf58f99de036204a37b889c24a80927b629cec"},
+ {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:047531242f8e9c2db733599f1c612925de095e93c9cc0e599e96cf536aaf56ba"},
+ {file = "pydantic_core-2.23.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5499798317fff7f25dbef9347f4451b91ac2a4330c6669821c8202fd354c7bee"},
+ {file = "pydantic_core-2.23.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bbb5e45eab7624440516ee3722a3044b83fff4c0372efe183fd6ba678ff681fe"},
+ {file = "pydantic_core-2.23.3-cp310-none-win32.whl", hash = "sha256:8b5b3ed73abb147704a6e9f556d8c5cb078f8c095be4588e669d315e0d11893b"},
+ {file = "pydantic_core-2.23.3-cp310-none-win_amd64.whl", hash = "sha256:2b603cde285322758a0279995b5796d64b63060bfbe214b50a3ca23b5cee3e83"},
+ {file = "pydantic_core-2.23.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:c889fd87e1f1bbeb877c2ee56b63bb297de4636661cc9bbfcf4b34e5e925bc27"},
+ {file = "pydantic_core-2.23.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ea85bda3189fb27503af4c45273735bcde3dd31c1ab17d11f37b04877859ef45"},
+ {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7f7f72f721223f33d3dc98a791666ebc6a91fa023ce63733709f4894a7dc611"},
+ {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b2b55b0448e9da68f56b696f313949cda1039e8ec7b5d294285335b53104b61"},
+ {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c24574c7e92e2c56379706b9a3f07c1e0c7f2f87a41b6ee86653100c4ce343e5"},
+ {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2b05e6ccbee333a8f4b8f4d7c244fdb7a979e90977ad9c51ea31261e2085ce0"},
+ {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2c409ce1c219c091e47cb03feb3c4ed8c2b8e004efc940da0166aaee8f9d6c8"},
+ {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d965e8b325f443ed3196db890d85dfebbb09f7384486a77461347f4adb1fa7f8"},
+ {file = "pydantic_core-2.23.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f56af3a420fb1ffaf43ece3ea09c2d27c444e7c40dcb7c6e7cf57aae764f2b48"},
+ {file = "pydantic_core-2.23.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5b01a078dd4f9a52494370af21aa52964e0a96d4862ac64ff7cea06e0f12d2c5"},
+ {file = "pydantic_core-2.23.3-cp311-none-win32.whl", hash = "sha256:560e32f0df04ac69b3dd818f71339983f6d1f70eb99d4d1f8e9705fb6c34a5c1"},
+ {file = "pydantic_core-2.23.3-cp311-none-win_amd64.whl", hash = "sha256:c744fa100fdea0d000d8bcddee95213d2de2e95b9c12be083370b2072333a0fa"},
+ {file = "pydantic_core-2.23.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:e0ec50663feedf64d21bad0809f5857bac1ce91deded203efc4a84b31b2e4305"},
+ {file = "pydantic_core-2.23.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:db6e6afcb95edbe6b357786684b71008499836e91f2a4a1e55b840955b341dbb"},
+ {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:98ccd69edcf49f0875d86942f4418a4e83eb3047f20eb897bffa62a5d419c8fa"},
+ {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a678c1ac5c5ec5685af0133262103defb427114e62eafeda12f1357a12140162"},
+ {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:01491d8b4d8db9f3391d93b0df60701e644ff0894352947f31fff3e52bd5c801"},
+ {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fcf31facf2796a2d3b7fe338fe8640aa0166e4e55b4cb108dbfd1058049bf4cb"},
+ {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7200fd561fb3be06827340da066df4311d0b6b8eb0c2116a110be5245dceb326"},
+ {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dc1636770a809dee2bd44dd74b89cc80eb41172bcad8af75dd0bc182c2666d4c"},
+ {file = "pydantic_core-2.23.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:67a5def279309f2e23014b608c4150b0c2d323bd7bccd27ff07b001c12c2415c"},
+ {file = "pydantic_core-2.23.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:748bdf985014c6dd3e1e4cc3db90f1c3ecc7246ff5a3cd4ddab20c768b2f1dab"},
+ {file = "pydantic_core-2.23.3-cp312-none-win32.whl", hash = "sha256:255ec6dcb899c115f1e2a64bc9ebc24cc0e3ab097775755244f77360d1f3c06c"},
+ {file = "pydantic_core-2.23.3-cp312-none-win_amd64.whl", hash = "sha256:40b8441be16c1e940abebed83cd006ddb9e3737a279e339dbd6d31578b802f7b"},
+ {file = "pydantic_core-2.23.3-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:6daaf5b1ba1369a22c8b050b643250e3e5efc6a78366d323294aee54953a4d5f"},
+ {file = "pydantic_core-2.23.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d015e63b985a78a3d4ccffd3bdf22b7c20b3bbd4b8227809b3e8e75bc37f9cb2"},
+ {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3fc572d9b5b5cfe13f8e8a6e26271d5d13f80173724b738557a8c7f3a8a3791"},
+ {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f6bd91345b5163ee7448bee201ed7dd601ca24f43f439109b0212e296eb5b423"},
+ {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fc379c73fd66606628b866f661e8785088afe2adaba78e6bbe80796baf708a63"},
+ {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbdce4b47592f9e296e19ac31667daed8753c8367ebb34b9a9bd89dacaa299c9"},
+ {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc3cf31edf405a161a0adad83246568647c54404739b614b1ff43dad2b02e6d5"},
+ {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8e22b477bf90db71c156f89a55bfe4d25177b81fce4aa09294d9e805eec13855"},
+ {file = "pydantic_core-2.23.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:0a0137ddf462575d9bce863c4c95bac3493ba8e22f8c28ca94634b4a1d3e2bb4"},
+ {file = "pydantic_core-2.23.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:203171e48946c3164fe7691fc349c79241ff8f28306abd4cad5f4f75ed80bc8d"},
+ {file = "pydantic_core-2.23.3-cp313-none-win32.whl", hash = "sha256:76bdab0de4acb3f119c2a4bff740e0c7dc2e6de7692774620f7452ce11ca76c8"},
+ {file = "pydantic_core-2.23.3-cp313-none-win_amd64.whl", hash = "sha256:37ba321ac2a46100c578a92e9a6aa33afe9ec99ffa084424291d84e456f490c1"},
+ {file = "pydantic_core-2.23.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d063c6b9fed7d992bcbebfc9133f4c24b7a7f215d6b102f3e082b1117cddb72c"},
+ {file = "pydantic_core-2.23.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6cb968da9a0746a0cf521b2b5ef25fc5a0bee9b9a1a8214e0a1cfaea5be7e8a4"},
+ {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edbefe079a520c5984e30e1f1f29325054b59534729c25b874a16a5048028d16"},
+ {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cbaaf2ef20d282659093913da9d402108203f7cb5955020bd8d1ae5a2325d1c4"},
+ {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fb539d7e5dc4aac345846f290cf504d2fd3c1be26ac4e8b5e4c2b688069ff4cf"},
+ {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e6f33503c5495059148cc486867e1d24ca35df5fc064686e631e314d959ad5b"},
+ {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:04b07490bc2f6f2717b10c3969e1b830f5720b632f8ae2f3b8b1542394c47a8e"},
+ {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:03795b9e8a5d7fda05f3873efc3f59105e2dcff14231680296b87b80bb327295"},
+ {file = "pydantic_core-2.23.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c483dab0f14b8d3f0df0c6c18d70b21b086f74c87ab03c59250dbf6d3c89baba"},
+ {file = "pydantic_core-2.23.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8b2682038e255e94baf2c473dca914a7460069171ff5cdd4080be18ab8a7fd6e"},
+ {file = "pydantic_core-2.23.3-cp38-none-win32.whl", hash = "sha256:f4a57db8966b3a1d1a350012839c6a0099f0898c56512dfade8a1fe5fb278710"},
+ {file = "pydantic_core-2.23.3-cp38-none-win_amd64.whl", hash = "sha256:13dd45ba2561603681a2676ca56006d6dee94493f03d5cadc055d2055615c3ea"},
+ {file = "pydantic_core-2.23.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:82da2f4703894134a9f000e24965df73cc103e31e8c31906cc1ee89fde72cbd8"},
+ {file = "pydantic_core-2.23.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dd9be0a42de08f4b58a3cc73a123f124f65c24698b95a54c1543065baca8cf0e"},
+ {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89b731f25c80830c76fdb13705c68fef6a2b6dc494402987c7ea9584fe189f5d"},
+ {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c6de1ec30c4bb94f3a69c9f5f2182baeda5b809f806676675e9ef6b8dc936f28"},
+ {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb68b41c3fa64587412b104294b9cbb027509dc2f6958446c502638d481525ef"},
+ {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c3980f2843de5184656aab58698011b42763ccba11c4a8c35936c8dd6c7068c"},
+ {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94f85614f2cba13f62c3c6481716e4adeae48e1eaa7e8bac379b9d177d93947a"},
+ {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:510b7fb0a86dc8f10a8bb43bd2f97beb63cffad1203071dc434dac26453955cd"},
+ {file = "pydantic_core-2.23.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1eba2f7ce3e30ee2170410e2171867ea73dbd692433b81a93758ab2de6c64835"},
+ {file = "pydantic_core-2.23.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4b259fd8409ab84b4041b7b3f24dcc41e4696f180b775961ca8142b5b21d0e70"},
+ {file = "pydantic_core-2.23.3-cp39-none-win32.whl", hash = "sha256:40d9bd259538dba2f40963286009bf7caf18b5112b19d2b55b09c14dde6db6a7"},
+ {file = "pydantic_core-2.23.3-cp39-none-win_amd64.whl", hash = "sha256:5a8cd3074a98ee70173a8633ad3c10e00dcb991ecec57263aacb4095c5efb958"},
+ {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f399e8657c67313476a121a6944311fab377085ca7f490648c9af97fc732732d"},
+ {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:6b5547d098c76e1694ba85f05b595720d7c60d342f24d5aad32c3049131fa5c4"},
+ {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0dda0290a6f608504882d9f7650975b4651ff91c85673341789a476b1159f211"},
+ {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65b6e5da855e9c55a0c67f4db8a492bf13d8d3316a59999cfbaf98cc6e401961"},
+ {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:09e926397f392059ce0afdcac920df29d9c833256354d0c55f1584b0b70cf07e"},
+ {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:87cfa0ed6b8c5bd6ae8b66de941cece179281239d482f363814d2b986b79cedc"},
+ {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e61328920154b6a44d98cabcb709f10e8b74276bc709c9a513a8c37a18786cc4"},
+ {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ce3317d155628301d649fe5e16a99528d5680af4ec7aa70b90b8dacd2d725c9b"},
+ {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e89513f014c6be0d17b00a9a7c81b1c426f4eb9224b15433f3d98c1a071f8433"},
+ {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:4f62c1c953d7ee375df5eb2e44ad50ce2f5aff931723b398b8bc6f0ac159791a"},
+ {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2718443bc671c7ac331de4eef9b673063b10af32a0bb385019ad61dcf2cc8f6c"},
+ {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0d90e08b2727c5d01af1b5ef4121d2f0c99fbee692c762f4d9d0409c9da6541"},
+ {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2b676583fc459c64146debea14ba3af54e540b61762dfc0613dc4e98c3f66eeb"},
+ {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:50e4661f3337977740fdbfbae084ae5693e505ca2b3130a6d4eb0f2281dc43b8"},
+ {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:68f4cf373f0de6abfe599a38307f4417c1c867ca381c03df27c873a9069cda25"},
+ {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:59d52cf01854cb26c46958552a21acb10dd78a52aa34c86f284e66b209db8cab"},
+ {file = "pydantic_core-2.23.3.tar.gz", hash = "sha256:3cb0f65d8b4121c1b015c60104a685feb929a29d7cf204387c7f2688c7974690"},
]
[package.dependencies]
@@ -446,13 +447,13 @@ files = [
[[package]]
name = "types-python-dateutil"
-version = "2.9.0.20240821"
+version = "2.9.0.20240906"
description = "Typing stubs for python-dateutil"
optional = false
python-versions = ">=3.8"
files = [
- {file = "types-python-dateutil-2.9.0.20240821.tar.gz", hash = "sha256:9649d1dcb6fef1046fb18bebe9ea2aa0028b160918518c34589a46045f6ebd98"},
- {file = "types_python_dateutil-2.9.0.20240821-py3-none-any.whl", hash = "sha256:f5889fcb4e63ed4aaa379b44f93c32593d50b9a94c9a60a0c854d8cc3511cd57"},
+ {file = "types-python-dateutil-2.9.0.20240906.tar.gz", hash = "sha256:9706c3b68284c25adffc47319ecc7947e5bb86b3773f843c73906fd598bc176e"},
+ {file = "types_python_dateutil-2.9.0.20240906-py3-none-any.whl", hash = "sha256:27c8cc2d058ccb14946eebcaaa503088f4f6dbc4fb6093d3d456a49aef2753f6"},
]
[[package]]
diff --git a/pyproject.toml b/pyproject.toml
index 6c886fb..a05f7fb 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "gooeyai"
-version = "0.0.1-beta5"
+version = "0.0.1-beta6"
description = ""
readme = "README.md"
authors = []
diff --git a/reference.md b/reference.md
index 9cf98dd..746713e 100644
--- a/reference.md
+++ b/reference.md
@@ -12,12 +12,19 @@
```python
-from gooey import Gooey
+from gooey import AnimationPrompt, Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.animate()
+client.animate(
+ animation_prompts=[
+ AnimationPrompt(
+ frame="frame",
+ prompt="prompt",
+ )
+ ],
+)
```
@@ -33,7 +40,7 @@ client.animate()
-
-**example_id:** `typing.Optional[str]`
+**animation_prompts:** `typing.List[AnimationPrompt]`
@@ -41,53 +48,55 @@ client.animate()
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**example_id:** `typing.Optional[str]`
-
-
+
+-
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+
-
-client.qr_code(...)
-
-#### 🔌 Usage
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
-
+**max_frames:** `typing.Optional[int]`
+
+
+
+
-
-```python
-from gooey import Gooey
-
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.qr_code()
-
-```
-
-
+**selected_model:** `typing.Optional[AnimateRequestSelectedModel]`
+
-#### ⚙️ Parameters
-
-
+**animation_mode:** `typing.Optional[str]`
+
+
+
+
-
-**example_id:** `typing.Optional[str]`
+**zoom:** `typing.Optional[str]`
@@ -95,53 +104,63 @@ client.qr_code()
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**translation_x:** `typing.Optional[str]`
-
-
+
+-
+**translation_y:** `typing.Optional[str]`
+
-
-client.seo_people_also_ask(...)
-
-#### 🔌 Usage
+**rotation3d_x:** `typing.Optional[str]`
+
+
+
-
+**rotation3d_y:** `typing.Optional[str]`
+
+
+
+
-
-```python
-from gooey import Gooey
-
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.seo_people_also_ask()
-
-```
+**rotation3d_z:** `typing.Optional[str]`
+
+
+
+-
+
+**fps:** `typing.Optional[int]`
+
-#### ⚙️ Parameters
-
-
+**seed:** `typing.Optional[int]`
+
+
+
+
-
-**example_id:** `typing.Optional[str]`
+**settings:** `typing.Optional[RunSettings]`
@@ -161,7 +180,7 @@ client.seo_people_also_ask()
-client.seo_content(...)
+client.qr_code(...)
-
@@ -179,7 +198,9 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.seo_content()
+client.qr_code(
+ text_prompt="text_prompt",
+)
```
@@ -195,7 +216,7 @@ client.seo_content()
-
-**example_id:** `typing.Optional[str]`
+**text_prompt:** `str`
@@ -203,53 +224,55 @@ client.seo_content()
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**example_id:** `typing.Optional[str]`
-
-
+
+-
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+
-
-client.web_search_llm(...)
-
-#### 🔌 Usage
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
-
+**qr_code_data:** `typing.Optional[str]`
+
+
+
+
-
-```python
-from gooey import Gooey
-
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.web_search_llm()
-
-```
-
-
+**qr_code_input_image:** `typing.Optional[str]`
+
-#### ⚙️ Parameters
-
-
+**qr_code_vcard:** `typing.Optional[Vcard]`
+
+
+
+
-
-**example_id:** `typing.Optional[str]`
+**qr_code_file:** `typing.Optional[str]`
@@ -257,53 +280,55 @@ client.web_search_llm()
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**use_url_shortener:** `typing.Optional[bool]`
-
-
+
+-
+**negative_prompt:** `typing.Optional[str]`
+
-
-client.personalize_email(...)
-
-#### 🔌 Usage
+**image_prompt:** `typing.Optional[str]`
+
+
+
-
+**image_prompt_controlnet_models:** `typing.Optional[typing.List[QrCodeRequestImagePromptControlnetModelsItem]]`
+
+
+
+
-
-```python
-from gooey import Gooey
-
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.personalize_email()
-
-```
-
-
+**image_prompt_strength:** `typing.Optional[float]`
+
-#### ⚙️ Parameters
-
-
+**image_prompt_scale:** `typing.Optional[float]`
+
+
+
+
-
-**example_id:** `typing.Optional[str]`
+**image_prompt_pos_x:** `typing.Optional[float]`
@@ -311,53 +336,55 @@ client.personalize_email()
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**image_prompt_pos_y:** `typing.Optional[float]`
-
-
+
+-
+**selected_model:** `typing.Optional[QrCodeRequestSelectedModel]`
+
-
-client.bulk_run(...)
-
-#### 🔌 Usage
+**selected_controlnet_model:** `typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]]`
+
+
+
-
+**output_width:** `typing.Optional[int]`
+
+
+
+
-
-```python
-from gooey import Gooey
-
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.bulk_run()
-
-```
-
-
+**output_height:** `typing.Optional[int]`
+
-#### ⚙️ Parameters
-
-
+**guidance_scale:** `typing.Optional[float]`
+
+
+
+
-
-**example_id:** `typing.Optional[str]`
+**controlnet_conditioning_scale:** `typing.Optional[typing.List[float]]`
@@ -365,53 +392,63 @@ client.bulk_run()
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**num_outputs:** `typing.Optional[int]`
-
-
+
+-
+**quality:** `typing.Optional[int]`
+
-
-client.synthesize_data(...)
-
-#### 🔌 Usage
+**scheduler:** `typing.Optional[QrCodeRequestScheduler]`
+
+
+
-
+**seed:** `typing.Optional[int]`
+
+
+
+
-
-```python
-from gooey import Gooey
-
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.synthesize_data()
-
-```
+**obj_scale:** `typing.Optional[float]`
+
+
+
+-
+
+**obj_pos_x:** `typing.Optional[float]`
+
-#### ⚙️ Parameters
-
-
+**obj_pos_y:** `typing.Optional[float]`
+
+
+
+
-
-**example_id:** `typing.Optional[str]`
+**settings:** `typing.Optional[RunSettings]`
@@ -431,7 +468,7 @@ client.synthesize_data()
-client.llm(...)
+client.seo_people_also_ask(...)
-
@@ -449,7 +486,10 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.llm()
+client.seo_people_also_ask(
+ search_query="search_query",
+ site_filter="site_filter",
+)
```
@@ -465,7 +505,7 @@ client.llm()
-
-**example_id:** `typing.Optional[str]`
+**search_query:** `str`
@@ -473,53 +513,63 @@ client.llm()
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**site_filter:** `str`
+
+
+-
+
+**example_id:** `typing.Optional[str]`
+
+
+-
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+
-
-client.rag(...)
-
-#### 🔌 Usage
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
-
+**task_instructions:** `typing.Optional[str]`
+
+
+
+
-
-```python
-from gooey import Gooey
+**query_instructions:** `typing.Optional[str]`
+
+
+
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.rag()
+
+-
-```
-
-
+**selected_model:** `typing.Optional[SeoPeopleAlsoAskRequestSelectedModel]`
+
-#### ⚙️ Parameters
-
-
--
-
-
-**example_id:** `typing.Optional[str]`
+**max_search_urls:** `typing.Optional[int]`
@@ -527,53 +577,60 @@ client.rag()
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**max_references:** `typing.Optional[int]`
-
-
+
+-
+**max_context_words:** `typing.Optional[int]`
+
-
-client.doc_summary(...)
-
-#### 🔌 Usage
+**scroll_jump:** `typing.Optional[int]`
+
+
+
-
+**embedding_model:** `typing.Optional[SeoPeopleAlsoAskRequestEmbeddingModel]`
+
+
+
+
-
-```python
-from gooey import Gooey
+**dense_weight:** `typing.Optional[float]`
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.doc_summary()
-```
-
-
+Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+
-#### ⚙️ Parameters
-
-
+**avoid_repetition:** `typing.Optional[bool]`
+
+
+
+
-
-**example_id:** `typing.Optional[str]`
+**num_outputs:** `typing.Optional[int]`
@@ -581,53 +638,71 @@ client.doc_summary()
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**quality:** `typing.Optional[float]`
-
-
+
+-
+**max_tokens:** `typing.Optional[int]`
+
-
-client.lipsync_tts(...)
-
-#### 🔌 Usage
+**sampling_temperature:** `typing.Optional[float]`
+
+
+
-
+**response_format_type:** `typing.Optional[SeoPeopleAlsoAskRequestResponseFormatType]`
+
+
+
+
-
-```python
-from gooey import Gooey
+**serp_search_location:** `typing.Optional[SerpSearchLocation]`
+
+
+
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.lipsync_tts()
+
+-
-```
+**scaleserp_locations:** `typing.Optional[typing.List[str]]` — DEPRECATED: use `serp_search_location` instead
+
+
+
+-
+
+**serp_search_type:** `typing.Optional[SerpSearchType]`
+
-#### ⚙️ Parameters
-
-
+**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead
+
+
+
+
-
-**example_id:** `typing.Optional[str]`
+**settings:** `typing.Optional[RunSettings]`
@@ -647,7 +722,7 @@ client.lipsync_tts()
-client.text_to_speech(...)
+client.seo_content(...)
-
@@ -665,7 +740,12 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.text_to_speech()
+client.seo_content(
+ search_query="search_query",
+ keywords="keywords",
+ title="title",
+ company_url="company_url",
+)
```
@@ -681,7 +761,7 @@ client.text_to_speech()
-
-**example_id:** `typing.Optional[str]`
+**search_query:** `str`
@@ -689,53 +769,55 @@ client.text_to_speech()
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**keywords:** `str`
-
-
+
+-
+**title:** `str`
+
-
-client.speech_recognition(...)
-
-#### 🔌 Usage
+**company_url:** `str`
+
+
+
-
+**example_id:** `typing.Optional[str]`
+
+
+
+
-
-```python
-from gooey import Gooey
-
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.speech_recognition()
-
-```
-
-
+**task_instructions:** `typing.Optional[str]`
+
-#### ⚙️ Parameters
-
-
+**enable_html:** `typing.Optional[bool]`
+
+
+
+
-
-**example_id:** `typing.Optional[str]`
+**selected_model:** `typing.Optional[SeoContentRequestSelectedModel]`
@@ -743,53 +825,55 @@ client.speech_recognition()
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**max_search_urls:** `typing.Optional[int]`
-
-
+
+-
+**enable_crosslinks:** `typing.Optional[bool]`
+
-
-client.text_to_music(...)
-
-#### 🔌 Usage
+**seed:** `typing.Optional[int]`
+
+
+
-
+**avoid_repetition:** `typing.Optional[bool]`
+
+
+
+
-
-```python
-from gooey import Gooey
-
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.text_to_music()
-
-```
-
-
+**num_outputs:** `typing.Optional[int]`
+
-#### ⚙️ Parameters
-
-
+**quality:** `typing.Optional[float]`
+
+
+
+
-
-**example_id:** `typing.Optional[str]`
+**max_tokens:** `typing.Optional[int]`
@@ -797,53 +881,55 @@ client.text_to_music()
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**sampling_temperature:** `typing.Optional[float]`
-
-
+
+-
+**response_format_type:** `typing.Optional[SeoContentRequestResponseFormatType]`
+
-
-client.translate(...)
-
-#### 🔌 Usage
+**serp_search_location:** `typing.Optional[SerpSearchLocation]`
+
+
+
-
+**scaleserp_locations:** `typing.Optional[typing.List[str]]` — DEPRECATED: use `serp_search_location` instead
+
+
+
+
-
-```python
-from gooey import Gooey
-
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.translate()
-
-```
-
-
+**serp_search_type:** `typing.Optional[SerpSearchType]`
+
-#### ⚙️ Parameters
-
-
+**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead
+
+
+
+
-
-**example_id:** `typing.Optional[str]`
+**settings:** `typing.Optional[RunSettings]`
@@ -863,7 +949,7 @@ client.translate()
-client.remix_image(...)
+client.web_search_llm(...)
-
@@ -881,7 +967,10 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.remix_image()
+client.web_search_llm(
+ search_query="search_query",
+ site_filter="site_filter",
+)
```
@@ -897,7 +986,7 @@ client.remix_image()
-
-**example_id:** `typing.Optional[str]`
+**search_query:** `str`
@@ -905,53 +994,55 @@ client.remix_image()
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**site_filter:** `str`
-
-
+
+-
+**example_id:** `typing.Optional[str]`
+
-
-client.text_to_image(...)
-
-#### 🔌 Usage
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+
+
+
-
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
-
-```python
-from gooey import Gooey
-
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.text_to_image()
-
-```
-
-
+**task_instructions:** `typing.Optional[str]`
+
-#### ⚙️ Parameters
-
-
+**query_instructions:** `typing.Optional[str]`
+
+
+
+
-
-**example_id:** `typing.Optional[str]`
+**selected_model:** `typing.Optional[WebSearchLlmRequestSelectedModel]`
@@ -959,53 +1050,68 @@ client.text_to_image()
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**max_search_urls:** `typing.Optional[int]`
+
+
+-
+
+**max_references:** `typing.Optional[int]`
+
+
+-
+**max_context_words:** `typing.Optional[int]`
+
-
-client.product_image(...)
-
-#### 🔌 Usage
+**scroll_jump:** `typing.Optional[int]`
+
+
+
-
+**embedding_model:** `typing.Optional[WebSearchLlmRequestEmbeddingModel]`
+
+
+
+
-
-```python
-from gooey import Gooey
+**dense_weight:** `typing.Optional[float]`
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.product_image()
-```
-
-
+Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+
-#### ⚙️ Parameters
-
-
+**avoid_repetition:** `typing.Optional[bool]`
+
+
+
+
-
-**example_id:** `typing.Optional[str]`
+**num_outputs:** `typing.Optional[int]`
@@ -1013,53 +1119,71 @@ client.product_image()
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**quality:** `typing.Optional[float]`
-
-
+
+-
+**max_tokens:** `typing.Optional[int]`
+
-
-client.portrait(...)
-
-#### 🔌 Usage
+**sampling_temperature:** `typing.Optional[float]`
+
+
+
-
+**response_format_type:** `typing.Optional[WebSearchLlmRequestResponseFormatType]`
+
+
+
+
-
-```python
-from gooey import Gooey
+**serp_search_location:** `typing.Optional[SerpSearchLocation]`
+
+
+
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.portrait()
+
+-
-```
+**scaleserp_locations:** `typing.Optional[typing.List[str]]` — DEPRECATED: use `serp_search_location` instead
+
+
+
+-
+
+**serp_search_type:** `typing.Optional[SerpSearchType]`
+
-#### ⚙️ Parameters
-
-
+**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead
+
+
+
+
-
-**example_id:** `typing.Optional[str]`
+**settings:** `typing.Optional[RunSettings]`
@@ -1079,7 +1203,7 @@ client.portrait()
-client.image_from_email(...)
+client.personalize_email(...)
-
@@ -1097,7 +1221,9 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.image_from_email()
+client.personalize_email(
+ email_address="email_address",
+)
```
@@ -1113,7 +1239,7 @@ client.image_from_email()
-
-**example_id:** `typing.Optional[str]`
+**email_address:** `str`
@@ -1121,53 +1247,39 @@ client.image_from_email()
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**example_id:** `typing.Optional[str]`
-
-
+
+-
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+
-
-client.image_from_web_search(...)
-
-#### 🔌 Usage
-
-
--
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
-
-```python
-from gooey import Gooey
-
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.image_from_web_search()
-
-```
-
-
+**input_prompt:** `typing.Optional[str]`
+
-#### ⚙️ Parameters
-
-
--
-
-
-**example_id:** `typing.Optional[str]`
+**selected_model:** `typing.Optional[PersonalizeEmailRequestSelectedModel]`
@@ -1175,53 +1287,55 @@ client.image_from_web_search()
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**avoid_repetition:** `typing.Optional[bool]`
-
-
+
+-
+**num_outputs:** `typing.Optional[int]`
+
-
-client.remove_background(...)
-
-#### 🔌 Usage
+**quality:** `typing.Optional[float]`
+
+
+
-
+**max_tokens:** `typing.Optional[int]`
+
+
+
+
-
-```python
-from gooey import Gooey
-
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.remove_background()
-
-```
-
-
+**sampling_temperature:** `typing.Optional[float]`
+
-#### ⚙️ Parameters
-
-
+**response_format_type:** `typing.Optional[PersonalizeEmailRequestResponseFormatType]`
+
+
+
+
-
-**example_id:** `typing.Optional[str]`
+**settings:** `typing.Optional[RunSettings]`
@@ -1241,7 +1355,7 @@ client.remove_background()
-client.upscale(...)
+client.bulk_run(...)
-
@@ -1259,7 +1373,12 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.upscale()
+client.bulk_run(
+ documents=["documents"],
+ run_urls=["run_urls"],
+ input_columns={"key": "value"},
+ output_columns={"key": "value"},
+)
```
@@ -1275,7 +1394,13 @@ client.upscale()
-
-**example_id:** `typing.Optional[str]`
+**documents:** `typing.List[str]`
+
+
+Upload or link to a CSV or google sheet that contains your sample input data.
+For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs.
+Remember to includes header names in your CSV too.
+
@@ -1283,49 +1408,40 @@ client.upscale()
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
-
-
-
+**run_urls:** `typing.List[str]`
+Provide one or more Gooey.AI workflow runs.
+You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them.
+
+
-
-client.embed(...)
-
-#### 🔌 Usage
+**input_columns:** `typing.Dict[str, str]`
-
--
+
+For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it.
+
+
+
+
-
-```python
-from gooey import Gooey
+**output_columns:** `typing.Dict[str, str]`
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.embed()
-```
-
-
+For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data.
+
+
-#### ⚙️ Parameters
-
-
--
-
-
@@ -1337,53 +1453,35 @@ client.embed()
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
-
-
-
-
-
-
-
-client.seo_people_also_ask_doc(...)
-
-#### 🔌 Usage
-
-
--
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
-
-```python
-from gooey import Gooey
+**eval_urls:** `typing.Optional[typing.List[str]]`
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.seo_people_also_ask_doc()
-```
-
-
+_(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs.
+
+
-#### ⚙️ Parameters
-
-
--
-
-
-**example_id:** `typing.Optional[str]`
+**settings:** `typing.Optional[RunSettings]`
@@ -1403,7 +1501,7 @@ client.seo_people_also_ask_doc()
-client.health_status_get()
+client.synthesize_data(...)
-
@@ -1421,7 +1519,9 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.health_status_get()
+client.synthesize_data(
+ documents=["documents"],
+)
```
@@ -1437,95 +1537,142 @@ client.health_status_get()
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**documents:** `typing.List[str]`
-
-
+
+-
+**example_id:** `typing.Optional[str]`
+
-
-client.post_v3chyron_plant_async()
-
-#### 🔌 Usage
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+
+
+
-
-
--
-
-```python
-from gooey import Gooey
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.post_v3chyron_plant_async()
+
+-
-```
+**sheet_url:** `typing.Optional[str]`
+
+
+
+-
+
+**selected_asr_model:** `typing.Optional[SynthesizeDataRequestSelectedAsrModel]`
+
-#### ⚙️ Parameters
-
-
+**google_translate_target:** `typing.Optional[str]`
+
+
+
+
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**glossary_document:** `typing.Optional[str]`
+
+Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+
+
+-
+
+**task_instructions:** `typing.Optional[str]`
+
+
+-
+**selected_model:** `typing.Optional[SynthesizeDataRequestSelectedModel]`
+
-
-client.post_v3compare_llm_async()
-
-#### 🔌 Usage
+**avoid_repetition:** `typing.Optional[bool]`
+
+
+
-
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
-
-```python
-from gooey import Gooey
+**quality:** `typing.Optional[float]`
+
+
+
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.post_v3compare_llm_async()
+
+-
-```
+**max_tokens:** `typing.Optional[int]`
+
+
+
+-
+
+**sampling_temperature:** `typing.Optional[float]`
+
-#### ⚙️ Parameters
+
+-
+
+**response_format_type:** `typing.Optional[SynthesizeDataRequestResponseFormatType]`
+
+
+
-
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
-
@@ -1541,7 +1688,7 @@ client.post_v3compare_llm_async()
-client.post_v3compare_text2img_async()
+client.llm(...)
-
@@ -1559,7 +1706,7 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.post_v3compare_text2img_async()
+client.llm()
```
@@ -1575,49 +1722,99 @@ client.post_v3compare_text2img_async()
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**example_id:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+-
+**input_prompt:** `typing.Optional[str]`
+
-
-client.post_v3deforum_sd_async()
-
-#### 🔌 Usage
+**selected_models:** `typing.Optional[typing.List[LlmRequestSelectedModelsItem]]`
+
+
+
-
+**avoid_repetition:** `typing.Optional[bool]`
+
+
+
+
-
-```python
-from gooey import Gooey
+**num_outputs:** `typing.Optional[int]`
+
+
+
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.post_v3deforum_sd_async()
+
+-
-```
+**quality:** `typing.Optional[float]`
+
+
+
+-
+
+**max_tokens:** `typing.Optional[int]`
+
-#### ⚙️ Parameters
+
+-
+
+**sampling_temperature:** `typing.Optional[float]`
+
+
+
-
+**response_format_type:** `typing.Optional[LlmRequestResponseFormatType]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
-
@@ -1633,7 +1830,7 @@ client.post_v3deforum_sd_async()
-client.post_v3email_face_inpainting_async()
+client.rag(...)
-
@@ -1651,7 +1848,9 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.post_v3email_face_inpainting_async()
+client.rag(
+ search_query="search_query",
+)
```
@@ -1667,233 +1866,5968 @@ client.post_v3email_face_inpainting_async()
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**search_query:** `str`
+
+
+-
+
+**example_id:** `typing.Optional[str]`
+
+
+-
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+
-
-client.post_v3face_inpainting_async()
-
-#### 🔌 Usage
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
-
+**keyword_query:** `typing.Optional[RagRequestKeywordQuery]`
+
+
+
+
-
-```python
-from gooey import Gooey
+**documents:** `typing.Optional[typing.List[str]]`
+
+
+
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.post_v3face_inpainting_async()
+
+-
-```
+**max_references:** `typing.Optional[int]`
+
+
+
+-
+
+**max_context_words:** `typing.Optional[int]`
+
-#### ⚙️ Parameters
+
+-
+
+**scroll_jump:** `typing.Optional[int]`
+
+
+
-
+**doc_extract_url:** `typing.Optional[str]`
+
+
+
+
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**embedding_model:** `typing.Optional[RagRequestEmbeddingModel]`
+
+
+
+
+
+-
+
+**dense_weight:** `typing.Optional[float]`
+
+
+Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+
+
+
+
+
+-
+
+**task_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**query_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[RagRequestSelectedModel]`
+
+
+-
+
+**citation_style:** `typing.Optional[RagRequestCitationStyle]`
+
+
+-
+**avoid_repetition:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**max_tokens:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sampling_temperature:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**response_format_type:** `typing.Optional[RagRequestResponseFormatType]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.doc_summary(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.doc_summary(
+ documents=["documents"],
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**documents:** `typing.List[str]`
+
+
+
+
+
+-
+
+**example_id:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**task_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**merge_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[DocSummaryRequestSelectedModel]`
+
+
+
+
+
+-
+
+**chain_type:** `typing.Optional[typing.Literal["map_reduce"]]`
+
+
+
+
+
+-
+
+**selected_asr_model:** `typing.Optional[DocSummaryRequestSelectedAsrModel]`
+
+
+
+
+
+-
+
+**google_translate_target:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**avoid_repetition:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**max_tokens:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sampling_temperature:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**response_format_type:** `typing.Optional[DocSummaryRequestResponseFormatType]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.lipsync_tts(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.lipsync_tts(
+ text_prompt="text_prompt",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**text_prompt:** `str`
+
+
+
+
+
+-
+
+**example_id:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**tts_provider:** `typing.Optional[LipsyncTtsRequestTtsProvider]`
+
+
+
+
+
+-
+
+**uberduck_voice_name:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**uberduck_speaking_rate:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**google_voice_name:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**google_speaking_rate:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**google_pitch:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**bark_history_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead
+
+
+
+
+
+-
+
+**elevenlabs_api_key:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**elevenlabs_voice_id:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**elevenlabs_model:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**elevenlabs_stability:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**elevenlabs_similarity_boost:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**elevenlabs_style:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**elevenlabs_speaker_boost:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**azure_voice_name:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**openai_voice_name:** `typing.Optional[LipsyncTtsRequestOpenaiVoiceName]`
+
+
+
+
+
+-
+
+**openai_tts_model:** `typing.Optional[LipsyncTtsRequestOpenaiTtsModel]`
+
+
+
+
+
+-
+
+**input_face:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**face_padding_top:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**face_padding_bottom:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**face_padding_left:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**face_padding_right:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sadtalker_settings:** `typing.Optional[SadTalkerSettings]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[LipsyncTtsRequestSelectedModel]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.text_to_speech(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.text_to_speech(
+ text_prompt="text_prompt",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**text_prompt:** `str`
+
+
+
+
+
+-
+
+**example_id:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**tts_provider:** `typing.Optional[TextToSpeechRequestTtsProvider]`
+
+
+
+
+
+-
+
+**uberduck_voice_name:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**uberduck_speaking_rate:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**google_voice_name:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**google_speaking_rate:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**google_pitch:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**bark_history_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead
+
+
+
+
+
+-
+
+**elevenlabs_api_key:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**elevenlabs_voice_id:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**elevenlabs_model:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**elevenlabs_stability:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**elevenlabs_similarity_boost:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**elevenlabs_style:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**elevenlabs_speaker_boost:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**azure_voice_name:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**openai_voice_name:** `typing.Optional[TextToSpeechRequestOpenaiVoiceName]`
+
+
+
+
+
+-
+
+**openai_tts_model:** `typing.Optional[TextToSpeechRequestOpenaiTtsModel]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.speech_recognition(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.speech_recognition(
+ documents=["documents"],
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**documents:** `typing.List[str]`
+
+
+
+
+
+-
+
+**example_id:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[SpeechRecognitionRequestSelectedModel]`
+
+
+
+
+
+-
+
+**language:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**translation_model:** `typing.Optional[SpeechRecognitionRequestTranslationModel]`
+
+
+
+
+
+-
+
+**output_format:** `typing.Optional[SpeechRecognitionRequestOutputFormat]`
+
+
+
+
+
+-
+
+**google_translate_target:** `typing.Optional[str]` — use `translation_model` & `translation_target` instead.
+
+
+
+
+
+-
+
+**translation_source:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**translation_target:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**glossary_document:** `typing.Optional[str]`
+
+Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.text_to_music(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.text_to_music(
+ text_prompt="text_prompt",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**text_prompt:** `str`
+
+
+
+
+
+-
+
+**example_id:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**negative_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**duration_sec:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**guidance_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**seed:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sd2upscaling:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**selected_models:** `typing.Optional[typing.List[typing.Literal["audio_ldm"]]]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.translate(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.translate()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**example_id:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**texts:** `typing.Optional[typing.List[str]]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[TranslateRequestSelectedModel]`
+
+
+
+
+
+-
+
+**translation_source:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**translation_target:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**glossary_document:** `typing.Optional[str]`
+
+Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.remix_image(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.remix_image(
+ input_image="input_image",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**input_image:** `str`
+
+
+
+
+
+-
+
+**example_id:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**text_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[RemixImageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**selected_controlnet_model:** `typing.Optional[RemixImageRequestSelectedControlnetModel]`
+
+
+
+
+
+-
+
+**negative_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**output_width:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**output_height:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**guidance_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**prompt_strength:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**controlnet_conditioning_scale:** `typing.Optional[typing.List[float]]`
+
+
+
+
+
+-
+
+**seed:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**image_guidance_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.text_to_image(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.text_to_image(
+ text_prompt="text_prompt",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**text_prompt:** `str`
+
+
+
+
+
+-
+
+**example_id:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**negative_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**output_width:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**output_height:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**dall_e3quality:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**dall_e3style:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**guidance_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**seed:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sd2upscaling:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**selected_models:** `typing.Optional[typing.List[TextToImageRequestSelectedModelsItem]]`
+
+
+
+
+
+-
+
+**scheduler:** `typing.Optional[TextToImageRequestScheduler]`
+
+
+
+
+
+-
+
+**edit_instruction:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**image_guidance_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.product_image(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.product_image(
+ input_image="input_image",
+ text_prompt="text_prompt",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**input_image:** `str`
+
+
+
+
+
+-
+
+**text_prompt:** `str`
+
+
+
+
+
+-
+
+**example_id:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**obj_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**obj_pos_x:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**obj_pos_y:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**mask_threshold:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[ProductImageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**negative_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**output_width:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**output_height:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**guidance_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**sd2upscaling:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**seed:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.portrait(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.portrait(
+ input_image="input_image",
+ text_prompt="text_prompt",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**input_image:** `str`
+
+
+
+
+
+-
+
+**text_prompt:** `str`
+
+
+
+
+
+-
+
+**example_id:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**face_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**face_pos_x:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**face_pos_y:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[PortraitRequestSelectedModel]`
+
+
+
+
+
+-
+
+**negative_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**upscale_factor:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**output_width:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**output_height:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**guidance_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**seed:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.image_from_email(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.image_from_email(
+ text_prompt="text_prompt",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**text_prompt:** `str`
+
+
+
+
+
+-
+
+**example_id:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**email_address:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**twitter_handle:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**face_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**face_pos_x:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**face_pos_y:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[ImageFromEmailRequestSelectedModel]`
+
+
+
+
+
+-
+
+**negative_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**upscale_factor:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**output_width:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**output_height:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**guidance_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**should_send_email:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**email_from:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**email_cc:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**email_bcc:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**email_subject:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**email_body:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**email_body_enable_html:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**fallback_email_body:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**seed:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.image_from_web_search(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.image_from_web_search(
+ search_query="search_query",
+ text_prompt="text_prompt",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**search_query:** `str`
+
+
+
+
+
+-
+
+**text_prompt:** `str`
+
+
+
+
+
+-
+
+**example_id:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**serp_search_location:** `typing.Optional[SerpSearchLocation]`
+
+
+
+
+
+-
+
+**scaleserp_locations:** `typing.Optional[typing.List[str]]` — DEPRECATED: use `serp_search_location` instead
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[ImageFromWebSearchRequestSelectedModel]`
+
+
+
+
+
+-
+
+**negative_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**guidance_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**prompt_strength:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**sd2upscaling:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**seed:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**image_guidance_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.remove_background(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.remove_background(
+ input_image="input_image",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**input_image:** `str`
+
+
+
+
+
+-
+
+**example_id:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[RemoveBackgroundRequestSelectedModel]`
+
+
+
+
+
+-
+
+**mask_threshold:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**rect_persepective_transform:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**reflection_opacity:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**obj_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**obj_pos_x:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**obj_pos_y:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.upscale(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.upscale(
+ scale=1,
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**scale:** `int` — The final upsampling scale of the image
+
+
+
+
+
+-
+
+**example_id:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**input_image:** `typing.Optional[str]` — Input Image
+
+
+
+
+
+-
+
+**input_video:** `typing.Optional[str]` — Input Video
+
+
+
+
+
+-
+
+**selected_models:** `typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]]`
+
+
+
+
+
+-
+
+**selected_bg_model:** `typing.Optional[typing.Literal["real_esrgan_x2"]]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.embed(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.embed(
+ texts=["texts"],
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**texts:** `typing.List[str]`
+
+
+
+
+
+-
+
+**example_id:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[EmbedRequestSelectedModel]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.seo_people_also_ask_doc(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.seo_people_also_ask_doc(
+ search_query="search_query",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**search_query:** `str`
+
+
+
+
+
+-
+
+**example_id:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**keyword_query:** `typing.Optional[SeoPeopleAlsoAskDocRequestKeywordQuery]`
+
+
+
+
+
+-
+
+**documents:** `typing.Optional[typing.List[str]]`
+
+
+
+
+
+-
+
+**max_references:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**max_context_words:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**scroll_jump:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**doc_extract_url:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**embedding_model:** `typing.Optional[SeoPeopleAlsoAskDocRequestEmbeddingModel]`
+
+
+
+
+
+-
+
+**dense_weight:** `typing.Optional[float]`
+
+
+Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+
+
+
+
+
+-
+
+**task_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**query_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[SeoPeopleAlsoAskDocRequestSelectedModel]`
+
+
+
+
+
+-
+
+**citation_style:** `typing.Optional[SeoPeopleAlsoAskDocRequestCitationStyle]`
+
+
+
+
+
+-
+
+**avoid_repetition:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**max_tokens:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sampling_temperature:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**response_format_type:** `typing.Optional[SeoPeopleAlsoAskDocRequestResponseFormatType]`
+
+
+
+
+
+-
+
+**serp_search_location:** `typing.Optional[SerpSearchLocation]`
+
+
+
+
+
+-
+
+**scaleserp_locations:** `typing.Optional[typing.List[str]]` — DEPRECATED: use `serp_search_location` instead
+
+
+
+
+
+-
+
+**serp_search_type:** `typing.Optional[SerpSearchType]`
+
+
+
+
+
+-
+
+**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.health_status_get()
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.health_status_get()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.post_v3chyron_plant_async()
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3chyron_plant_async()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.post_v3compare_llm_async()
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3compare_llm_async()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.post_v3compare_text2img_async()
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3compare_text2img_async()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.post_v3deforum_sd_async()
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3deforum_sd_async()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.post_v3email_face_inpainting_async()
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3email_face_inpainting_async()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.post_v3face_inpainting_async()
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3face_inpainting_async()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.post_v3google_image_gen_async()
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3google_image_gen_async()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.post_v3image_segmentation_async()
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3image_segmentation_async()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.post_v3img2img_async()
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3img2img_async()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.post_v3letter_writer_async()
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3letter_writer_async()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.post_v3lipsync_async()
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3lipsync_async()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.post_v3lipsync_tts_async()
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3lipsync_tts_async()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.post_v3object_inpainting_async()
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3object_inpainting_async()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.post_v3seo_summary_async()
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3seo_summary_async()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.post_v3smart_gpt_async()
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3smart_gpt_async()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.post_v3social_lookup_email_async()
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3social_lookup_email_async()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.post_v3text_to_speech_async()
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3text_to_speech_async()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.post_v3art_qr_code_async()
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3art_qr_code_async()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.post_v3asr_async()
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3asr_async()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.post_v3bulk_eval_async()
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3bulk_eval_async()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.post_v3bulk_runner_async()
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3bulk_runner_async()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.post_v3compare_ai_upscalers_async()
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3compare_ai_upscalers_async()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.post_v3doc_extract_async()
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3doc_extract_async()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.post_v3doc_search_async()
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3doc_search_async()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.post_v3doc_summary_async()
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3doc_summary_async()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.post_v3embeddings_async()
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3embeddings_async()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.post_v3functions_async()
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3functions_async()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.post_v3google_gpt_async()
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3google_gpt_async()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.post_v3related_qna_maker_doc_async()
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3related_qna_maker_doc_async()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.post_v3related_qna_maker_async()
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3related_qna_maker_async()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.post_v3text2audio_async()
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3text2audio_async()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.post_v3translate_async()
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3translate_async()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.post_v3video_bots_async()
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.post_v3video_bots_async()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## CopilotIntegrations
+client.copilot_integrations.video_bots_stream_create(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.copilot_integrations.video_bots_stream_create(
+ integration_id="integration_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**integration_id:** `str` — Your Integration ID as shown in the Copilot Integrations tab
+
+
+
+
+
+-
+
+**conversation_id:** `typing.Optional[str]`
+
+The gooey conversation ID.
+
+If not provided, a new conversation will be started and a new ID will be returned in the response. Use this to maintain the state of the conversation between requests.
+
+Note that you may not provide a custom ID here, and must only use the `conversation_id` returned in a previous response.
+
+
+
+
+
+-
+
+**user_id:** `typing.Optional[str]`
+
+Your app's custom user ID.
+
+If not provided, a random user will be created and a new ID will be returned in the response. If a `conversation_id` is provided, this field is automatically set to the user's id associated with that conversation.
+
+
+
+
+
+-
+
+**user_message_id:** `typing.Optional[str]`
+
+Your app's custom message ID for the user message.
+
+If not provided, a random ID will be generated and returned in the response. This is useful for tracking messages in the conversation.
+
+
+
+
+
+-
+
+**button_pressed:** `typing.Optional[ButtonPressed]` — The button that was pressed by the user.
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**input_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**input_audio:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**input_images:** `typing.Optional[typing.List[str]]`
+
+
+
+
+
+-
+
+**input_documents:** `typing.Optional[typing.List[str]]`
+
+
+
+
+
+-
+
+**doc_extract_url:** `typing.Optional[str]` — Select a workflow to extract text from documents and images.
+
+
+
+
+
+-
+
+**messages:** `typing.Optional[typing.List[ConversationEntry]]`
+
+
+
+
+
+-
+
+**bot_script:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[VideoBotsStreamCreateRequestSelectedModel]`
+
+
+
+
+
+-
+
+**document_model:** `typing.Optional[str]` — When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api))
+
+
+
+
+
+-
+
+**task_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**query_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**keyword_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**documents:** `typing.Optional[typing.List[str]]`
+
+
+
+
+
+-
+
+**max_references:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**max_context_words:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**scroll_jump:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**embedding_model:** `typing.Optional[VideoBotsStreamCreateRequestEmbeddingModel]`
+
+
+
+
+
+-
+
+**dense_weight:** `typing.Optional[float]`
+
+
+Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+
+
+
+
+
+-
+
+**citation_style:** `typing.Optional[VideoBotsStreamCreateRequestCitationStyle]`
+
+
+
+
+
+-
+
+**use_url_shortener:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**asr_model:** `typing.Optional[VideoBotsStreamCreateRequestAsrModel]` — Choose a model to transcribe incoming audio messages to text.
+
+
+
+
+
+-
+
+**asr_language:** `typing.Optional[str]` — Choose a language to transcribe incoming audio messages to text.
+
+
+
+
+
+-
+
+**translation_model:** `typing.Optional[VideoBotsStreamCreateRequestTranslationModel]`
+
+
+
+
+
+-
+
+**user_language:** `typing.Optional[str]` — Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages.
+
+
+
+
+
+-
+
+**input_glossary_document:** `typing.Optional[str]`
+
+
+Translation Glossary for User Langauge -> LLM Language (English)
+
+
+
+
+
+
+-
+
+**output_glossary_document:** `typing.Optional[str]`
+
+
+Translation Glossary for LLM Language (English) -> User Langauge
+
+
+
+
+
+
+-
+
+**lipsync_model:** `typing.Optional[VideoBotsStreamCreateRequestLipsyncModel]`
+
+
+
+
+
+-
+
+**tools:** `typing.Optional[typing.List[LlmTools]]` — Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
+
+
+
+
+
+-
+
+**avoid_repetition:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**max_tokens:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sampling_temperature:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**response_format_type:** `typing.Optional[VideoBotsStreamCreateRequestResponseFormatType]`
+
+
+
+
+
+-
+
+**tts_provider:** `typing.Optional[VideoBotsStreamCreateRequestTtsProvider]`
+
+
+
+
+
+-
+
+**uberduck_voice_name:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**uberduck_speaking_rate:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**google_voice_name:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**google_speaking_rate:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**google_pitch:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**bark_history_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead
+
+
+
+
+
+-
+
+**elevenlabs_api_key:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**elevenlabs_voice_id:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**elevenlabs_model:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**elevenlabs_stability:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**elevenlabs_similarity_boost:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**elevenlabs_style:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**elevenlabs_speaker_boost:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**azure_voice_name:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**openai_voice_name:** `typing.Optional[VideoBotsStreamCreateRequestOpenaiVoiceName]`
+
+
+
+
+
+-
+
+**openai_tts_model:** `typing.Optional[VideoBotsStreamCreateRequestOpenaiTtsModel]`
+
+
+
+
+
+-
+
+**input_face:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**face_padding_top:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**face_padding_bottom:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**face_padding_left:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**face_padding_right:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sadtalker_settings:** `typing.Optional[SadTalkerSettings]`
+
+
+
+
+
+-
+
+**input_text:** `typing.Optional[str]` — Use `input_prompt` instead
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.copilot_integrations.video_bots_stream(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.copilot_integrations.video_bots_stream(
+ request_id="request_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_id:** `str`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## CopilotForYourEnterprise
+client.copilot_for_your_enterprise.async_form_video_bots(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.copilot_for_your_enterprise.async_form_video_bots()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**example_id:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**input_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**input_audio:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**input_images:** `typing.Optional[typing.List[str]]`
+
+
+
+
+
+-
+
+**input_documents:** `typing.Optional[typing.List[str]]`
+
+
+
+
+
+-
+
+**doc_extract_url:** `typing.Optional[str]` — Select a workflow to extract text from documents and images.
+
+
+
+
+
+-
+
+**messages:** `typing.Optional[typing.List[ConversationEntry]]`
+
+
+
+
+
+-
+
+**bot_script:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[AsyncFormVideoBotsRequestSelectedModel]`
+
+
+
+
+
+-
+
+**document_model:** `typing.Optional[str]` — When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api))
+
+
+
+
+
+-
+
+**task_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**query_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**keyword_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**documents:** `typing.Optional[typing.List[str]]`
+
+
+
+
+
+-
+
+**max_references:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**max_context_words:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**scroll_jump:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**embedding_model:** `typing.Optional[AsyncFormVideoBotsRequestEmbeddingModel]`
+
+
+
+
+
+-
+
+**dense_weight:** `typing.Optional[float]`
+
+
+Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+
+
+
+
+
+-
+
+**citation_style:** `typing.Optional[AsyncFormVideoBotsRequestCitationStyle]`
+
+
+
+
+
+-
+
+**use_url_shortener:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**asr_model:** `typing.Optional[AsyncFormVideoBotsRequestAsrModel]` — Choose a model to transcribe incoming audio messages to text.
+
+
+
+
+
+-
+
+**asr_language:** `typing.Optional[str]` — Choose a language to transcribe incoming audio messages to text.
+
+
+
+
+
+-
+
+**translation_model:** `typing.Optional[AsyncFormVideoBotsRequestTranslationModel]`
+
+
+
+
+
+-
+
+**user_language:** `typing.Optional[str]` — Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages.
+
+
+
+
+
+-
+
+**input_glossary_document:** `typing.Optional[str]`
+
+
+Translation Glossary for User Langauge -> LLM Language (English)
+
+
+
+
+
+
+-
+
+**output_glossary_document:** `typing.Optional[str]`
+
+
+Translation Glossary for LLM Language (English) -> User Langauge
+
+
+
+
+
+
+-
+
+**lipsync_model:** `typing.Optional[AsyncFormVideoBotsRequestLipsyncModel]`
+
+
+
+
+
+-
+
+**tools:** `typing.Optional[typing.List[LlmTools]]` — Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
+
+
+
+
+
+-
+
+**avoid_repetition:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**max_tokens:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sampling_temperature:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**response_format_type:** `typing.Optional[AsyncFormVideoBotsRequestResponseFormatType]`
+
+
+
+
+
+-
+
+**tts_provider:** `typing.Optional[AsyncFormVideoBotsRequestTtsProvider]`
+
+
+
+
+
+-
+
+**uberduck_voice_name:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**uberduck_speaking_rate:** `typing.Optional[float]`
+
-
-client.post_v3google_image_gen_async()
-
-#### 🔌 Usage
+**google_voice_name:** `typing.Optional[str]`
+
+
+
-
+**google_speaking_rate:** `typing.Optional[float]`
+
+
+
+
-
-```python
-from gooey import Gooey
-
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.post_v3google_image_gen_async()
-
-```
-
-
+**google_pitch:** `typing.Optional[float]`
+
-#### ⚙️ Parameters
-
-
+**bark_history_prompt:** `typing.Optional[str]`
+
+
+
+
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead
-
-
+
+-
+**elevenlabs_api_key:** `typing.Optional[str]`
+
-
-client.post_v3image_segmentation_async()
-
-#### 🔌 Usage
+**elevenlabs_voice_id:** `typing.Optional[str]`
+
+
+
-
+**elevenlabs_model:** `typing.Optional[str]`
+
+
+
+
-
-```python
-from gooey import Gooey
-
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.post_v3image_segmentation_async()
-
-```
-
-
+**elevenlabs_stability:** `typing.Optional[float]`
+
-#### ⚙️ Parameters
-
-
+**elevenlabs_similarity_boost:** `typing.Optional[float]`
+
+
+
+
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**elevenlabs_style:** `typing.Optional[float]`
-
-
+
+-
+**elevenlabs_speaker_boost:** `typing.Optional[bool]`
+
-
-client.post_v3img2img_async()
-
-#### 🔌 Usage
+**azure_voice_name:** `typing.Optional[str]`
+
+
+
-
+**openai_voice_name:** `typing.Optional[AsyncFormVideoBotsRequestOpenaiVoiceName]`
+
+
+
+
-
-```python
-from gooey import Gooey
-
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.post_v3img2img_async()
-
-```
-
-
+**openai_tts_model:** `typing.Optional[AsyncFormVideoBotsRequestOpenaiTtsModel]`
+
-#### ⚙️ Parameters
-
-
+**input_face:** `typing.Optional[str]`
+
+
+
+
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**face_padding_top:** `typing.Optional[int]`
-
-
+
+-
+**face_padding_bottom:** `typing.Optional[int]`
+
-
-client.post_v3letter_writer_async()
-
-#### 🔌 Usage
+**face_padding_left:** `typing.Optional[int]`
+
+
+
-
+**face_padding_right:** `typing.Optional[int]`
+
+
+
+
-
-```python
-from gooey import Gooey
-
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.post_v3letter_writer_async()
-
-```
-
-
+**sadtalker_settings:** `typing.Optional[SadTalkerSettings]`
+
-#### ⚙️ Parameters
-
-
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
-
@@ -1909,7 +7843,7 @@ client.post_v3letter_writer_async()
-client.post_v3lipsync_async()
+client.copilot_for_your_enterprise.status_video_bots(...)
-
@@ -1927,7 +7861,9 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.post_v3lipsync_async()
+client.copilot_for_your_enterprise.status_video_bots(
+ run_id="run_id",
+)
```
@@ -1943,6 +7879,14 @@ client.post_v3lipsync_async()
-
+**run_id:** `str`
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -1955,7 +7899,8 @@ client.post_v3lipsync_async()
-client.post_v3lipsync_tts_async()
+## AiAnimationGenerator
+client.ai_animation_generator.status_deforum_sd(...)
-
@@ -1973,7 +7918,9 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.post_v3lipsync_tts_async()
+client.ai_animation_generator.status_deforum_sd(
+ run_id="run_id",
+)
```
@@ -1989,6 +7936,14 @@ client.post_v3lipsync_tts_async()
-
+**run_id:** `str`
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -2001,7 +7956,8 @@ client.post_v3lipsync_tts_async()
-client.post_v3object_inpainting_async()
+## AiArtQrCode
+client.ai_art_qr_code.status_art_qr_code(...)
-
@@ -2019,7 +7975,9 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.post_v3object_inpainting_async()
+client.ai_art_qr_code.status_art_qr_code(
+ run_id="run_id",
+)
```
@@ -2035,6 +7993,14 @@ client.post_v3object_inpainting_async()
-
+**run_id:** `str`
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -2047,7 +8013,8 @@ client.post_v3object_inpainting_async()
-client.post_v3seo_summary_async()
+## GeneratePeopleAlsoAskSeoContent
+client.generate_people_also_ask_seo_content.status_related_qna_maker(...)
-
@@ -2065,7 +8032,9 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.post_v3seo_summary_async()
+client.generate_people_also_ask_seo_content.status_related_qna_maker(
+ run_id="run_id",
+)
```
@@ -2081,6 +8050,14 @@ client.post_v3seo_summary_async()
-
+**run_id:** `str`
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -2093,7 +8070,8 @@ client.post_v3seo_summary_async()
-client.post_v3smart_gpt_async()
+## CreateAPerfectSeoOptimizedTitleParagraph
+client.create_a_perfect_seo_optimized_title_paragraph.status_seo_summary(...)
-
@@ -2111,7 +8089,9 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.post_v3smart_gpt_async()
+client.create_a_perfect_seo_optimized_title_paragraph.status_seo_summary(
+ run_id="run_id",
+)
```
@@ -2127,6 +8107,14 @@ client.post_v3smart_gpt_async()
-
+**run_id:** `str`
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -2139,7 +8127,8 @@ client.post_v3smart_gpt_async()
-client.post_v3social_lookup_email_async()
+## WebSearchGpt3
+client.web_search_gpt3.status_google_gpt(...)
-
@@ -2157,7 +8146,9 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.post_v3social_lookup_email_async()
+client.web_search_gpt3.status_google_gpt(
+ run_id="run_id",
+)
```
@@ -2173,6 +8164,14 @@ client.post_v3social_lookup_email_async()
-
+**run_id:** `str`
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -2185,7 +8184,8 @@ client.post_v3social_lookup_email_async()
-client.post_v3text_to_speech_async()
+## ProfileLookupGpt3ForAiPersonalizedEmails
+client.profile_lookup_gpt3for_ai_personalized_emails.status_social_lookup_email(...)
-
@@ -2203,7 +8203,9 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.post_v3text_to_speech_async()
+client.profile_lookup_gpt3for_ai_personalized_emails.status_social_lookup_email(
+ run_id="run_id",
+)
```
@@ -2219,6 +8221,14 @@ client.post_v3text_to_speech_async()
-
+**run_id:** `str`
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -2231,7 +8241,8 @@ client.post_v3text_to_speech_async()
-client.post_v3art_qr_code_async()
+## BulkRunner
+client.bulk_runner.status_bulk_runner(...)
-
@@ -2249,7 +8260,9 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.post_v3art_qr_code_async()
+client.bulk_runner.status_bulk_runner(
+ run_id="run_id",
+)
```
@@ -2265,6 +8278,14 @@ client.post_v3art_qr_code_async()
-
+**run_id:** `str`
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -2277,7 +8298,8 @@ client.post_v3art_qr_code_async()
-client.post_v3asr_async()
+## Evaluator
+client.evaluator.async_form_bulk_eval(...)
-
@@ -2295,7 +8317,9 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.post_v3asr_async()
+client.evaluator.async_form_bulk_eval(
+ documents=["documents"],
+)
```
@@ -2311,95 +8335,130 @@ client.post_v3asr_async()
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**documents:** `typing.List[str]`
+
+
+Upload or link to a CSV or google sheet that contains your sample input data.
+For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs.
+Remember to includes header names in your CSV too.
+
+
+
+
+
+
+-
+
+**example_id:** `typing.Optional[str]`
+
+
+-
+
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+
+
+-
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
-
-client.post_v3bulk_eval_async()
-
-#### 🔌 Usage
+**eval_prompts:** `typing.Optional[typing.List[EvalPrompt]]`
-
--
+
+Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values.
+_The `columns` dictionary can be used to reference the spreadsheet columns._
+
+
+
+
-
-```python
-from gooey import Gooey
+**agg_functions:** `typing.Optional[typing.List[AggFunction]]`
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.post_v3bulk_eval_async()
-```
-
-
+Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats).
+
+
-#### ⚙️ Parameters
-
-
+**selected_model:** `typing.Optional[AsyncFormBulkEvalRequestSelectedModel]`
+
+
+
+
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**avoid_repetition:** `typing.Optional[bool]`
-
-
+
+-
+**num_outputs:** `typing.Optional[int]`
+
-
-client.post_v3bulk_runner_async()
-
-#### 🔌 Usage
+**quality:** `typing.Optional[float]`
+
+
+
-
+**max_tokens:** `typing.Optional[int]`
+
+
+
+
-
-```python
-from gooey import Gooey
-
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.post_v3bulk_runner_async()
-
-```
+**sampling_temperature:** `typing.Optional[float]`
+
+
+
+-
+
+**response_format_type:** `typing.Optional[AsyncFormBulkEvalRequestResponseFormatType]`
+
-#### ⚙️ Parameters
-
-
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
-
@@ -2415,7 +8474,7 @@ client.post_v3bulk_runner_async()
-client.post_v3compare_ai_upscalers_async()
+client.evaluator.status_bulk_eval(...)
-
@@ -2433,7 +8492,9 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.post_v3compare_ai_upscalers_async()
+client.evaluator.status_bulk_eval(
+ run_id="run_id",
+)
```
@@ -2449,6 +8510,14 @@ client.post_v3compare_ai_upscalers_async()
-
+**run_id:** `str`
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -2461,7 +8530,8 @@ client.post_v3compare_ai_upscalers_async()
-client.post_v3doc_extract_async()
+## SyntheticDataMakerForVideosPdFs
+client.synthetic_data_maker_for_videos_pd_fs.status_doc_extract(...)
-
@@ -2479,7 +8549,9 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.post_v3doc_extract_async()
+client.synthetic_data_maker_for_videos_pd_fs.status_doc_extract(
+ run_id="run_id",
+)
```
@@ -2495,6 +8567,14 @@ client.post_v3doc_extract_async()
-
+**run_id:** `str`
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -2507,7 +8587,8 @@ client.post_v3doc_extract_async()
-client.post_v3doc_search_async()
+## LargeLanguageModelsGpt3
+client.large_language_models_gpt3.status_compare_llm(...)
-
@@ -2525,7 +8606,9 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.post_v3doc_search_async()
+client.large_language_models_gpt3.status_compare_llm(
+ run_id="run_id",
+)
```
@@ -2541,6 +8624,14 @@ client.post_v3doc_search_async()
-
+**run_id:** `str`
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -2553,7 +8644,8 @@ client.post_v3doc_search_async()
-client.post_v3doc_summary_async()
+## SearchYourDocsWithGpt
+client.search_your_docs_with_gpt.status_doc_search(...)
-
@@ -2571,7 +8663,9 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.post_v3doc_summary_async()
+client.search_your_docs_with_gpt.status_doc_search(
+ run_id="run_id",
+)
```
@@ -2587,6 +8681,14 @@ client.post_v3doc_summary_async()
-
+**run_id:** `str`
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -2599,7 +8701,8 @@ client.post_v3doc_summary_async()
-client.post_v3embeddings_async()
+## SmartGpt
+client.smart_gpt.async_form_smart_gpt(...)
-
@@ -2617,7 +8720,9 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.post_v3embeddings_async()
+client.smart_gpt.async_form_smart_gpt(
+ input_prompt="input_prompt",
+)
```
@@ -2633,95 +8738,123 @@ client.post_v3embeddings_async()
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**input_prompt:** `str`
-
-
+
+-
+**example_id:** `typing.Optional[str]`
+
-
-client.post_v3functions_async()
-
-#### 🔌 Usage
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+
+
+
-
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
-
-```python
-from gooey import Gooey
-
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.post_v3functions_async()
-
-```
-
-
+**cot_prompt:** `typing.Optional[str]`
+
-#### ⚙️ Parameters
-
-
+**reflexion_prompt:** `typing.Optional[str]`
+
+
+
+
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**dera_prompt:** `typing.Optional[str]`
+
+
+-
+
+**selected_model:** `typing.Optional[AsyncFormSmartGptRequestSelectedModel]`
+
+
+-
+**avoid_repetition:** `typing.Optional[bool]`
+
-
-client.post_v3google_gpt_async()
-
-#### 🔌 Usage
+**num_outputs:** `typing.Optional[int]`
+
+
+
-
+**quality:** `typing.Optional[float]`
+
+
+
+
-
-```python
-from gooey import Gooey
+**max_tokens:** `typing.Optional[int]`
+
+
+
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.post_v3google_gpt_async()
+
+-
-```
+**sampling_temperature:** `typing.Optional[float]`
+
+
+
+-
+
+**response_format_type:** `typing.Optional[AsyncFormSmartGptRequestResponseFormatType]`
+
-#### ⚙️ Parameters
-
-
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
-
@@ -2737,7 +8870,7 @@ client.post_v3google_gpt_async()
-client.post_v3related_qna_maker_doc_async()
+client.smart_gpt.status_smart_gpt(...)
-
@@ -2755,7 +8888,9 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.post_v3related_qna_maker_doc_async()
+client.smart_gpt.status_smart_gpt(
+ run_id="run_id",
+)
```
@@ -2771,6 +8906,14 @@ client.post_v3related_qna_maker_doc_async()
-
+**run_id:** `str`
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -2783,7 +8926,8 @@ client.post_v3related_qna_maker_doc_async()
-client.post_v3related_qna_maker_async()
+## SummarizeYourDocsWithGpt
+client.summarize_your_docs_with_gpt.status_doc_summary(...)
-
@@ -2801,7 +8945,9 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.post_v3related_qna_maker_async()
+client.summarize_your_docs_with_gpt.status_doc_summary(
+ run_id="run_id",
+)
```
@@ -2817,6 +8963,14 @@ client.post_v3related_qna_maker_async()
-
+**run_id:** `str`
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -2829,7 +8983,8 @@ client.post_v3related_qna_maker_async()
-client.post_v3text2audio_async()
+## Functions
+client.functions.async_form_functions(...)
-
@@ -2847,7 +9002,7 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.post_v3text2audio_async()
+client.functions.async_form_functions()
```
@@ -2863,49 +9018,35 @@ client.post_v3text2audio_async()
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**example_id:** `typing.Optional[str]`
-
-
-
-
-
-
-
-client.post_v3translate_async()
-
-#### 🔌 Usage
-
-
--
+**code:** `typing.Optional[str]` — The JS code to be executed.
+
+
+
-
-```python
-from gooey import Gooey
-
-client = Gooey(
- api_key="YOUR_API_KEY",
-)
-client.post_v3translate_async()
-
-```
-
-
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used in the code
+
-#### ⚙️ Parameters
-
-
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
-
@@ -2921,7 +9062,7 @@ client.post_v3translate_async()
-client.post_v3video_bots_async()
+client.functions.status_functions(...)
-
@@ -2939,7 +9080,9 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.post_v3video_bots_async()
+client.functions.status_functions(
+ run_id="run_id",
+)
```
@@ -2955,6 +9098,14 @@ client.post_v3video_bots_async()
-
+**run_id:** `str`
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -2967,8 +9118,8 @@ client.post_v3video_bots_async()
-## CopilotIntegrations
-client.copilot_integrations.video_bots_stream_create(...)
+## LipSyncing
+client.lip_syncing.async_form_lipsync(...)
-
@@ -2986,9 +9137,7 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.copilot_integrations.video_bots_stream_create(
- integration_id="integration_id",
-)
+client.lip_syncing.async_form_lipsync()
```
@@ -3004,7 +9153,7 @@ client.copilot_integrations.video_bots_stream_create(
-
-**integration_id:** `str` — Your Integration ID as shown in the Copilot Integrations tab
+**example_id:** `typing.Optional[str]`
@@ -3012,13 +9161,7 @@ client.copilot_integrations.video_bots_stream_create(
-
-**conversation_id:** `typing.Optional[str]`
-
-The gooey conversation ID.
-
-If not provided, a new conversation will be started and a new ID will be returned in the response. Use this to maintain the state of the conversation between requests.
-
-Note that you may not provide a custom ID here, and must only use the `conversation_id` returned in a previous response.
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
@@ -3026,11 +9169,7 @@ Note that you may not provide a custom ID here, and must only use the `conversat
-
-**user_id:** `typing.Optional[str]`
-
-Your app's custom user ID.
-
-If not provided, a random user will be created and a new ID will be returned in the response. If a `conversation_id` is provided, this field is automatically set to the user's id associated with that conversation.
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -3038,11 +9177,7 @@ If not provided, a random user will be created and a new ID will be returned in
-
-**user_message_id:** `typing.Optional[str]`
-
-Your app's custom message ID for the user message.
-
-If not provided, a random ID will be generated and returned in the response. This is useful for tracking messages in the conversation.
+**input_face:** `typing.Optional[str]`
@@ -3050,7 +9185,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-
-**button_pressed:** `typing.Optional[ButtonPressed]` — The button that was pressed by the user.
+**face_padding_top:** `typing.Optional[int]`
@@ -3058,7 +9193,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+**face_padding_bottom:** `typing.Optional[int]`
@@ -3066,7 +9201,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+**face_padding_left:** `typing.Optional[int]`
@@ -3074,7 +9209,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-
-**input_prompt:** `typing.Optional[str]`
+**face_padding_right:** `typing.Optional[int]`
@@ -3082,7 +9217,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-
-**input_audio:** `typing.Optional[str]`
+**sadtalker_settings:** `typing.Optional[SadTalkerSettings]`
@@ -3090,7 +9225,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-
-**input_images:** `typing.Optional[typing.Sequence[str]]`
+**selected_model:** `typing.Optional[AsyncFormLipsyncRequestSelectedModel]`
@@ -3098,7 +9233,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-
-**input_documents:** `typing.Optional[typing.Sequence[str]]`
+**input_audio:** `typing.Optional[str]`
@@ -3106,7 +9241,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-
-**doc_extract_url:** `typing.Optional[str]` — Select a workflow to extract text from documents and images.
+**settings:** `typing.Optional[RunSettings]`
@@ -3114,63 +9249,55 @@ If not provided, a random ID will be generated and returned in the response. Thi
-
-**messages:** `typing.Optional[typing.Sequence[ConversationEntry]]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
--
-
-**bot_script:** `typing.Optional[str]`
-
-
--
-**selected_model:** `typing.Optional[CreateStreamRequestSelectedModel]`
-
+
+client.lip_syncing.status_lipsync(...)
-
-**document_model:** `typing.Optional[str]` — When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api))
-
-
-
+#### 🔌 Usage
-
-**task_instructions:** `typing.Optional[str]`
-
-
-
-
-
-**query_instructions:** `typing.Optional[str]`
-
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.lip_syncing.status_lipsync(
+ run_id="run_id",
+)
+
+```
+
+
+
+#### ⚙️ Parameters
-
-**keyword_instructions:** `typing.Optional[str]`
-
-
-
-
-
-**documents:** `typing.Optional[typing.Sequence[str]]`
+**run_id:** `str`
@@ -3178,68 +9305,56 @@ If not provided, a random ID will be generated and returned in the response. Thi
-
-**max_references:** `typing.Optional[int]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**max_context_words:** `typing.Optional[int]`
-
+
+## LipsyncVideoWithAnyText
+client.lipsync_video_with_any_text.status_lipsync_tts(...)
-
-**scroll_jump:** `typing.Optional[int]`
-
-
-
+#### 🔌 Usage
-
-**embedding_model:** `typing.Optional[CreateStreamRequestEmbeddingModel]`
-
-
-
-
-
-**dense_weight:** `typing.Optional[float]`
+```python
+from gooey import Gooey
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.lipsync_video_with_any_text.status_lipsync_tts(
+ run_id="run_id",
+)
-Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
-Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
-
-
+```
-
-
--
-
-**citation_style:** `typing.Optional[CreateStreamRequestCitationStyle]`
-
+#### ⚙️ Parameters
+
-
-**use_url_shortener:** `typing.Optional[bool]`
-
-
-
-
-
-**asr_model:** `typing.Optional[CreateStreamRequestAsrModel]` — Choose a model to transcribe incoming audio messages to text.
+**run_id:** `str`
@@ -3247,71 +9362,56 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-
-**asr_language:** `typing.Optional[str]` — Choose a language to transcribe incoming audio messages to text.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
--
-
-**translation_model:** `typing.Optional[CreateStreamRequestTranslationModel]`
-
-
--
-**user_language:** `typing.Optional[str]` — Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages.
-
+
+## CompareAiVoiceGenerators
+client.compare_ai_voice_generators.status_text_to_speech(...)
-
-**input_glossary_document:** `typing.Optional[str]`
-
+#### 🔌 Usage
-Translation Glossary for User Langauge -> LLM Language (English)
-
-
-
-
+
+-
-
-**output_glossary_document:** `typing.Optional[str]`
+```python
+from gooey import Gooey
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.compare_ai_voice_generators.status_text_to_speech(
+ run_id="run_id",
+)
-Translation Glossary for LLM Language (English) -> User Langauge
-
-
+```
-
-
--
-
-**lipsync_model:** `typing.Optional[CreateStreamRequestLipsyncModel]`
-
+#### ⚙️ Parameters
+
-
-**tools:** `typing.Optional[typing.Sequence[LlmTools]]` — Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
-
-
-
-
-
-**avoid_repetition:** `typing.Optional[bool]`
+**run_id:** `str`
@@ -3319,71 +9419,56 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**num_outputs:** `typing.Optional[int]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
--
-
-**quality:** `typing.Optional[float]`
-
-
--
-**max_tokens:** `typing.Optional[int]`
-
+
+## SpeechRecognitionTranslation
+client.speech_recognition_translation.status_asr(...)
-
-**sampling_temperature:** `typing.Optional[float]`
-
-
-
+#### 🔌 Usage
-
-**response_format_type:** `typing.Optional[CreateStreamRequestResponseFormatType]`
-
-
-
-
-
-**tts_provider:** `typing.Optional[CreateStreamRequestTtsProvider]`
-
-
-
+```python
+from gooey import Gooey
-
--
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.speech_recognition_translation.status_asr(
+ run_id="run_id",
+)
-**uberduck_voice_name:** `typing.Optional[str]`
-
+```
+
+
+
+#### ⚙️ Parameters
-
-**uberduck_speaking_rate:** `typing.Optional[float]`
-
-
-
-
-
-**google_voice_name:** `typing.Optional[str]`
+**run_id:** `str`
@@ -3391,55 +9476,56 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**google_speaking_rate:** `typing.Optional[float]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**google_pitch:** `typing.Optional[float]`
-
+
+## TextGuidedAudioGenerator
+client.text_guided_audio_generator.status_text2audio(...)
-
-**bark_history_prompt:** `typing.Optional[str]`
-
-
-
+#### 🔌 Usage
-
-**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead
-
-
-
-
-
-**elevenlabs_api_key:** `typing.Optional[str]`
-
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.text_guided_audio_generator.status_text2audio(
+ run_id="run_id",
+)
+
+```
+
+
+
+#### ⚙️ Parameters
-
-**elevenlabs_voice_id:** `typing.Optional[str]`
-
-
-
-
-
-**elevenlabs_model:** `typing.Optional[str]`
+**run_id:** `str`
@@ -3447,55 +9533,56 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**elevenlabs_stability:** `typing.Optional[float]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**elevenlabs_similarity_boost:** `typing.Optional[float]`
-
+
+## CompareAiTranslations
+client.compare_ai_translations.status_translate(...)
-
-**elevenlabs_style:** `typing.Optional[float]`
-
-
-
+#### 🔌 Usage
-
-**elevenlabs_speaker_boost:** `typing.Optional[bool]`
-
-
-
-
-
-**azure_voice_name:** `typing.Optional[str]`
-
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.compare_ai_translations.status_translate(
+ run_id="run_id",
+)
+
+```
+
+
+
+#### ⚙️ Parameters
-
-**openai_voice_name:** `typing.Optional[CreateStreamRequestOpenaiVoiceName]`
-
-
-
-
-
-**openai_tts_model:** `typing.Optional[CreateStreamRequestOpenaiTtsModel]`
+**run_id:** `str`
@@ -3503,55 +9590,56 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**input_face:** `typing.Optional[str]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**face_padding_top:** `typing.Optional[int]`
-
+
+## EditAnImageWithAiPrompt
+client.edit_an_image_with_ai_prompt.status_img2img(...)
-
-**face_padding_bottom:** `typing.Optional[int]`
-
-
-
+#### 🔌 Usage
-
-**face_padding_left:** `typing.Optional[int]`
-
-
-
-
-
-**face_padding_right:** `typing.Optional[int]`
-
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.edit_an_image_with_ai_prompt.status_img2img(
+ run_id="run_id",
+)
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**sadtalker_settings:** `typing.Optional[SadTalkerSettings]`
-
-
-
-
-
-**input_text:** `typing.Optional[str]` — Use `input_prompt` instead
+**run_id:** `str`
@@ -3571,7 +9659,8 @@ Translation Glossary for LLM Language (English) -> User Langauge
-client.copilot_integrations.video_bots_stream(...)
+## CompareAiImageGenerators
+client.compare_ai_image_generators.status_compare_text2img(...)
-
@@ -3589,8 +9678,8 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.copilot_integrations.video_bots_stream(
- request_id="request_id",
+client.compare_ai_image_generators.status_compare_text2img(
+ run_id="run_id",
)
```
@@ -3607,7 +9696,7 @@ client.copilot_integrations.video_bots_stream(
-
-**request_id:** `str`
+**run_id:** `str`
@@ -3627,8 +9716,8 @@ client.copilot_integrations.video_bots_stream(
-## CopilotForYourEnterprise
-client.copilot_for_your_enterprise.async_form_video_bots(...)
+## GenerateProductPhotoBackgrounds
+client.generate_product_photo_backgrounds.status_object_inpainting(...)
-
@@ -3646,7 +9735,9 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.copilot_for_your_enterprise.async_form_video_bots()
+client.generate_product_photo_backgrounds.status_object_inpainting(
+ run_id="run_id",
+)
```
@@ -3662,7 +9753,7 @@ client.copilot_for_your_enterprise.async_form_video_bots()
-
-**example_id:** `typing.Optional[str]`
+**run_id:** `str`
@@ -3682,8 +9773,8 @@ client.copilot_for_your_enterprise.async_form_video_bots()
-## Evaluator
-client.evaluator.async_form_bulk_eval(...)
+## AiImageWithAFace
+client.ai_image_with_a_face.status_face_inpainting(...)
-
@@ -3701,7 +9792,9 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.evaluator.async_form_bulk_eval()
+client.ai_image_with_a_face.status_face_inpainting(
+ run_id="run_id",
+)
```
@@ -3717,7 +9810,7 @@ client.evaluator.async_form_bulk_eval()
-
-**example_id:** `typing.Optional[str]`
+**run_id:** `str`
@@ -3737,8 +9830,8 @@ client.evaluator.async_form_bulk_eval()
-## SmartGpt
-client.smart_gpt.async_form_smart_gpt(...)
+## AiGeneratedPhotoFromEmailProfileLookup
+client.ai_generated_photo_from_email_profile_lookup.status_email_face_inpainting(...)
-
@@ -3756,7 +9849,9 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.smart_gpt.async_form_smart_gpt()
+client.ai_generated_photo_from_email_profile_lookup.status_email_face_inpainting(
+ run_id="run_id",
+)
```
@@ -3772,7 +9867,7 @@ client.smart_gpt.async_form_smart_gpt()
-
-**example_id:** `typing.Optional[str]`
+**run_id:** `str`
@@ -3792,7 +9887,8 @@ client.smart_gpt.async_form_smart_gpt()
-client.smart_gpt.post()
+## RenderImageSearchResultsWithAi
+client.render_image_search_results_with_ai.status_google_image_gen(...)
-
@@ -3810,7 +9906,9 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.smart_gpt.post()
+client.render_image_search_results_with_ai.status_google_image_gen(
+ run_id="run_id",
+)
```
@@ -3826,6 +9924,14 @@ client.smart_gpt.post()
-
+**run_id:** `str`
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -3838,8 +9944,8 @@ client.smart_gpt.post()
-## Functions
-client.functions.async_form_functions(...)
+## AiBackgroundChanger
+client.ai_background_changer.status_image_segmentation(...)
-
@@ -3857,7 +9963,9 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.functions.async_form_functions()
+client.ai_background_changer.status_image_segmentation(
+ run_id="run_id",
+)
```
@@ -3873,7 +9981,7 @@ client.functions.async_form_functions()
-
-**example_id:** `typing.Optional[str]`
+**run_id:** `str`
@@ -3893,7 +10001,8 @@ client.functions.async_form_functions()
-client.functions.post()
+## CompareAiImageUpscalers
+client.compare_ai_image_upscalers.status_compare_ai_upscalers(...)
-
@@ -3911,7 +10020,9 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.functions.post()
+client.compare_ai_image_upscalers.status_compare_ai_upscalers(
+ run_id="run_id",
+)
```
@@ -3927,6 +10038,14 @@ client.functions.post()
-
+**run_id:** `str`
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -3939,8 +10058,8 @@ client.functions.post()
-## LipSyncing
-client.lip_syncing.async_form_lipsync(...)
+## ChyronPlantBot
+client.chyron_plant_bot.status_chyron_plant(...)
-
@@ -3958,7 +10077,9 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.lip_syncing.async_form_lipsync()
+client.chyron_plant_bot.status_chyron_plant(
+ run_id="run_id",
+)
```
@@ -3974,7 +10095,7 @@ client.lip_syncing.async_form_lipsync()
-
-**example_id:** `typing.Optional[str]`
+**run_id:** `str`
@@ -3994,8 +10115,8 @@ client.lip_syncing.async_form_lipsync()
-## Misc
-client.misc.get_balance()
+## LetterWriter
+client.letter_writer.status_letter_writer(...)
-
@@ -4013,7 +10134,9 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.misc.get_balance()
+client.letter_writer.status_letter_writer(
+ run_id="run_id",
+)
```
@@ -4029,6 +10152,14 @@ client.misc.get_balance()
-
+**run_id:** `str`
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -4041,7 +10172,8 @@ client.misc.get_balance()
-client.misc.video_bots_broadcast(...)
+## Embeddings
+client.embeddings.status_embeddings(...)
-
@@ -4059,8 +10191,8 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.misc.video_bots_broadcast(
- text="text",
+client.embeddings.status_embeddings(
+ run_id="run_id",
)
```
@@ -4077,7 +10209,7 @@ client.misc.video_bots_broadcast(
-
-**text:** `str` — Message to broadcast to all users
+**run_id:** `str`
@@ -4085,55 +10217,56 @@ client.misc.video_bots_broadcast(
-
-**example_id:** `typing.Optional[str]`
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**run_id:** `typing.Optional[str]`
-
+
+## PeopleAlsoAskAnswersFromADoc
+client.people_also_ask_answers_from_a_doc.status_related_qna_maker_doc(...)
-
-**audio:** `typing.Optional[str]` — Audio URL to send to all users
-
-
-
+#### 🔌 Usage
-
-**video:** `typing.Optional[str]` — Video URL to send to all users
-
-
-
-
-
-**documents:** `typing.Optional[typing.Sequence[str]]` — Video URL to send to all users
-
+```python
+from gooey import Gooey
+
+client = Gooey(
+ api_key="YOUR_API_KEY",
+)
+client.people_also_ask_answers_from_a_doc.status_related_qna_maker_doc(
+ run_id="run_id",
+)
+
+```
+
+
+
+#### ⚙️ Parameters
-
-**buttons:** `typing.Optional[typing.Sequence[ReplyButton]]` — Buttons to send to all users
-
-
-
-
-
-**filters:** `typing.Optional[BotBroadcastFilters]` — Filters to select users to broadcast to. If not provided, will broadcast to all users of this bot.
+**run_id:** `str`
@@ -4153,8 +10286,8 @@ client.misc.video_bots_broadcast(
-## BulkRunner
-client.bulk_runner.post()
+## Misc
+client.misc.get_balance()
-
@@ -4172,7 +10305,7 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.bulk_runner.post()
+client.misc.get_balance()
```
@@ -4200,8 +10333,7 @@ client.bulk_runner.post()
-## Embeddings
-client.embeddings.post()
+client.misc.video_bots_broadcast(...)
-
@@ -4219,7 +10351,9 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.embeddings.post()
+client.misc.video_bots_broadcast(
+ text="text",
+)
```
@@ -4235,6 +10369,70 @@ client.embeddings.post()
-
+**text:** `str` — Message to broadcast to all users
+
+
+
+
+
+-
+
+**example_id:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**run_id:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**audio:** `typing.Optional[str]` — Audio URL to send to all users
+
+
+
+
+
+-
+
+**video:** `typing.Optional[str]` — Video URL to send to all users
+
+
+
+
+
+-
+
+**documents:** `typing.Optional[typing.List[str]]` — Video URL to send to all users
+
+
+
+
+
+-
+
+**buttons:** `typing.Optional[typing.List[ReplyButton]]` — Buttons to send to all users
+
+
+
+
+
+-
+
+**filters:** `typing.Optional[BotBroadcastFilters]` — Filters to select users to broadcast to. If not provided, will broadcast to all users of this bot.
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
diff --git a/src/gooey/__init__.py b/src/gooey/__init__.py
index 116880c..3dd8833 100644
--- a/src/gooey/__init__.py
+++ b/src/gooey/__init__.py
@@ -5,62 +5,19 @@
AggFunctionFunction,
AggFunctionResult,
AggFunctionResultFunction,
+ AnimateRequestSelectedModel,
AnimationPrompt,
AsrChunk,
AsrOutputJson,
AsrPageOutput,
AsrPageOutputOutputTextItem,
- AsrPageRequest,
- AsrPageRequestOutputFormat,
- AsrPageRequestSelectedModel,
- AsrPageRequestTranslationModel,
- AsrPageResponse,
AsrPageStatusResponse,
AsyncApiResponseModelV3,
BalanceResponse,
- BodyAsyncFormArtQrCode,
- BodyAsyncFormAsr,
- BodyAsyncFormBulkEval,
- BodyAsyncFormBulkRunner,
- BodyAsyncFormChyronPlant,
- BodyAsyncFormCompareAiUpscalers,
- BodyAsyncFormCompareLlm,
- BodyAsyncFormCompareText2Img,
- BodyAsyncFormDeforumSd,
- BodyAsyncFormDocExtract,
- BodyAsyncFormDocSearch,
- BodyAsyncFormDocSummary,
- BodyAsyncFormEmailFaceInpainting,
- BodyAsyncFormEmbeddings,
- BodyAsyncFormFaceInpainting,
- BodyAsyncFormFunctions,
- BodyAsyncFormGoogleGpt,
- BodyAsyncFormGoogleImageGen,
- BodyAsyncFormImageSegmentation,
- BodyAsyncFormImg2Img,
- BodyAsyncFormLetterWriter,
- BodyAsyncFormLipsync,
- BodyAsyncFormLipsyncTts,
- BodyAsyncFormObjectInpainting,
- BodyAsyncFormRelatedQnaMaker,
- BodyAsyncFormRelatedQnaMakerDoc,
- BodyAsyncFormSeoSummary,
- BodyAsyncFormSmartGpt,
- BodyAsyncFormSocialLookupEmail,
- BodyAsyncFormText2Audio,
- BodyAsyncFormTextToSpeech,
- BodyAsyncFormTranslate,
- BodyAsyncFormVideoBots,
BotBroadcastFilters,
BulkEvalPageOutput,
- BulkEvalPageRequest,
- BulkEvalPageRequestResponseFormatType,
- BulkEvalPageRequestSelectedModel,
- BulkEvalPageResponse,
BulkEvalPageStatusResponse,
BulkRunnerPageOutput,
- BulkRunnerPageRequest,
- BulkRunnerPageResponse,
BulkRunnerPageStatusResponse,
ButtonPressed,
CalledFunctionResponse,
@@ -69,24 +26,12 @@
ChatCompletionContentPartTextParam,
ChyronPlantPageOutput,
ChyronPlantPageRequest,
- ChyronPlantPageResponse,
ChyronPlantPageStatusResponse,
CompareLlmPageOutput,
- CompareLlmPageRequest,
- CompareLlmPageRequestResponseFormatType,
- CompareLlmPageRequestSelectedModelsItem,
- CompareLlmPageResponse,
CompareLlmPageStatusResponse,
CompareText2ImgPageOutput,
- CompareText2ImgPageRequest,
- CompareText2ImgPageRequestScheduler,
- CompareText2ImgPageRequestSelectedModelsItem,
- CompareText2ImgPageResponse,
CompareText2ImgPageStatusResponse,
CompareUpscalerPageOutput,
- CompareUpscalerPageRequest,
- CompareUpscalerPageRequestSelectedModelsItem,
- CompareUpscalerPageResponse,
CompareUpscalerPageStatusResponse,
ConsoleLogs,
ConsoleLogsLevel,
@@ -99,140 +44,91 @@
ConversationStart,
CreateStreamResponse,
DeforumSdPageOutput,
- DeforumSdPageRequest,
- DeforumSdPageRequestSelectedModel,
- DeforumSdPageResponse,
DeforumSdPageStatusResponse,
DocExtractPageOutput,
- DocExtractPageRequest,
- DocExtractPageRequestResponseFormatType,
- DocExtractPageRequestSelectedAsrModel,
- DocExtractPageRequestSelectedModel,
- DocExtractPageResponse,
DocExtractPageStatusResponse,
DocSearchPageOutput,
- DocSearchPageRequest,
- DocSearchPageRequestCitationStyle,
- DocSearchPageRequestEmbeddingModel,
- DocSearchPageRequestKeywordQuery,
- DocSearchPageRequestResponseFormatType,
- DocSearchPageRequestSelectedModel,
- DocSearchPageResponse,
DocSearchPageStatusResponse,
DocSummaryPageOutput,
- DocSummaryPageRequest,
- DocSummaryPageRequestResponseFormatType,
- DocSummaryPageRequestSelectedAsrModel,
- DocSummaryPageRequestSelectedModel,
- DocSummaryPageResponse,
DocSummaryPageStatusResponse,
+ DocSummaryRequestResponseFormatType,
+ DocSummaryRequestSelectedAsrModel,
+ DocSummaryRequestSelectedModel,
EmailFaceInpaintingPageOutput,
- EmailFaceInpaintingPageRequest,
- EmailFaceInpaintingPageRequestSelectedModel,
- EmailFaceInpaintingPageResponse,
EmailFaceInpaintingPageStatusResponse,
+ EmbedRequestSelectedModel,
EmbeddingsPageOutput,
- EmbeddingsPageRequest,
- EmbeddingsPageRequestSelectedModel,
- EmbeddingsPageResponse,
EmbeddingsPageStatusResponse,
EvalPrompt,
FaceInpaintingPageOutput,
- FaceInpaintingPageRequest,
- FaceInpaintingPageRequestSelectedModel,
- FaceInpaintingPageResponse,
FaceInpaintingPageStatusResponse,
FailedReponseModelV2,
FailedResponseDetail,
FinalResponse,
FunctionsPageOutput,
- FunctionsPageRequest,
- FunctionsPageResponse,
FunctionsPageStatusResponse,
GenericErrorResponse,
GenericErrorResponseDetail,
GoogleGptPageOutput,
- GoogleGptPageRequest,
- GoogleGptPageRequestEmbeddingModel,
- GoogleGptPageRequestResponseFormatType,
- GoogleGptPageRequestSelectedModel,
- GoogleGptPageResponse,
GoogleGptPageStatusResponse,
GoogleImageGenPageOutput,
- GoogleImageGenPageRequest,
- GoogleImageGenPageRequestSelectedModel,
- GoogleImageGenPageResponse,
GoogleImageGenPageStatusResponse,
HttpValidationError,
+ ImageFromEmailRequestSelectedModel,
+ ImageFromWebSearchRequestSelectedModel,
ImageSegmentationPageOutput,
- ImageSegmentationPageRequest,
- ImageSegmentationPageRequestSelectedModel,
- ImageSegmentationPageResponse,
ImageSegmentationPageStatusResponse,
ImageUrl,
ImageUrlDetail,
Img2ImgPageOutput,
- Img2ImgPageRequest,
- Img2ImgPageRequestSelectedControlnetModel,
- Img2ImgPageRequestSelectedControlnetModelItem,
- Img2ImgPageRequestSelectedModel,
- Img2ImgPageResponse,
Img2ImgPageStatusResponse,
LetterWriterPageOutput,
LetterWriterPageRequest,
- LetterWriterPageResponse,
LetterWriterPageStatusResponse,
LipsyncPageOutput,
- LipsyncPageRequest,
- LipsyncPageRequestSelectedModel,
- LipsyncPageResponse,
LipsyncPageStatusResponse,
LipsyncTtsPageOutput,
- LipsyncTtsPageRequest,
- LipsyncTtsPageRequestOpenaiTtsModel,
- LipsyncTtsPageRequestOpenaiVoiceName,
- LipsyncTtsPageRequestSelectedModel,
- LipsyncTtsPageRequestTtsProvider,
- LipsyncTtsPageResponse,
LipsyncTtsPageStatusResponse,
+ LipsyncTtsRequestOpenaiTtsModel,
+ LipsyncTtsRequestOpenaiVoiceName,
+ LipsyncTtsRequestSelectedModel,
+ LipsyncTtsRequestTtsProvider,
+ LlmRequestResponseFormatType,
+ LlmRequestSelectedModelsItem,
LlmTools,
MessagePart,
ObjectInpaintingPageOutput,
- ObjectInpaintingPageRequest,
- ObjectInpaintingPageRequestSelectedModel,
- ObjectInpaintingPageResponse,
ObjectInpaintingPageStatusResponse,
+ PersonalizeEmailRequestResponseFormatType,
+ PersonalizeEmailRequestSelectedModel,
+ PortraitRequestSelectedModel,
+ ProductImageRequestSelectedModel,
PromptTreeNode,
PromptTreeNodePrompt,
QrCodeGeneratorPageOutput,
- QrCodeGeneratorPageRequest,
- QrCodeGeneratorPageRequestImagePromptControlnetModelsItem,
- QrCodeGeneratorPageRequestScheduler,
- QrCodeGeneratorPageRequestSelectedControlnetModelItem,
- QrCodeGeneratorPageRequestSelectedModel,
- QrCodeGeneratorPageResponse,
QrCodeGeneratorPageStatusResponse,
+ QrCodeRequestImagePromptControlnetModelsItem,
+ QrCodeRequestScheduler,
+ QrCodeRequestSelectedControlnetModelItem,
+ QrCodeRequestSelectedModel,
+ RagRequestCitationStyle,
+ RagRequestEmbeddingModel,
+ RagRequestKeywordQuery,
+ RagRequestResponseFormatType,
+ RagRequestSelectedModel,
RecipeFunction,
RecipeFunctionTrigger,
RecipeRunState,
RelatedDocSearchResponse,
RelatedGoogleGptResponse,
RelatedQnADocPageOutput,
- RelatedQnADocPageRequest,
- RelatedQnADocPageRequestCitationStyle,
- RelatedQnADocPageRequestEmbeddingModel,
- RelatedQnADocPageRequestKeywordQuery,
- RelatedQnADocPageRequestResponseFormatType,
- RelatedQnADocPageRequestSelectedModel,
- RelatedQnADocPageResponse,
RelatedQnADocPageStatusResponse,
RelatedQnAPageOutput,
- RelatedQnAPageRequest,
- RelatedQnAPageRequestEmbeddingModel,
- RelatedQnAPageRequestResponseFormatType,
- RelatedQnAPageRequestSelectedModel,
- RelatedQnAPageResponse,
RelatedQnAPageStatusResponse,
+ RemixImageRequestSelectedControlnetModel,
+ RemixImageRequestSelectedControlnetModelItem,
+ RemixImageRequestSelectedModel,
+ RemoveBackgroundRequestSelectedModel,
ReplyButton,
ResponseModel,
ResponseModelFinalKeywordQuery,
@@ -243,63 +139,55 @@
SadTalkerSettings,
SadTalkerSettingsPreprocess,
SearchReference,
+ SeoContentRequestResponseFormatType,
+ SeoContentRequestSelectedModel,
+ SeoPeopleAlsoAskDocRequestCitationStyle,
+ SeoPeopleAlsoAskDocRequestEmbeddingModel,
+ SeoPeopleAlsoAskDocRequestKeywordQuery,
+ SeoPeopleAlsoAskDocRequestResponseFormatType,
+ SeoPeopleAlsoAskDocRequestSelectedModel,
+ SeoPeopleAlsoAskRequestEmbeddingModel,
+ SeoPeopleAlsoAskRequestResponseFormatType,
+ SeoPeopleAlsoAskRequestSelectedModel,
SeoSummaryPageOutput,
- SeoSummaryPageRequest,
- SeoSummaryPageRequestResponseFormatType,
- SeoSummaryPageRequestSelectedModel,
- SeoSummaryPageResponse,
SeoSummaryPageStatusResponse,
SerpSearchLocation,
SerpSearchType,
SmartGptPageOutput,
- SmartGptPageRequest,
- SmartGptPageRequestResponseFormatType,
- SmartGptPageRequestSelectedModel,
- SmartGptPageResponse,
SmartGptPageStatusResponse,
SocialLookupEmailPageOutput,
- SocialLookupEmailPageRequest,
- SocialLookupEmailPageRequestResponseFormatType,
- SocialLookupEmailPageRequestSelectedModel,
- SocialLookupEmailPageResponse,
SocialLookupEmailPageStatusResponse,
+ SpeechRecognitionRequestOutputFormat,
+ SpeechRecognitionRequestSelectedModel,
+ SpeechRecognitionRequestTranslationModel,
StreamError,
+ SynthesizeDataRequestResponseFormatType,
+ SynthesizeDataRequestSelectedAsrModel,
+ SynthesizeDataRequestSelectedModel,
Text2AudioPageOutput,
- Text2AudioPageRequest,
- Text2AudioPageResponse,
Text2AudioPageStatusResponse,
+ TextToImageRequestScheduler,
+ TextToImageRequestSelectedModelsItem,
TextToSpeechPageOutput,
- TextToSpeechPageRequest,
- TextToSpeechPageRequestOpenaiTtsModel,
- TextToSpeechPageRequestOpenaiVoiceName,
- TextToSpeechPageRequestTtsProvider,
- TextToSpeechPageResponse,
TextToSpeechPageStatusResponse,
+ TextToSpeechRequestOpenaiTtsModel,
+ TextToSpeechRequestOpenaiVoiceName,
+ TextToSpeechRequestTtsProvider,
TrainingDataModel,
+ TranslateRequestSelectedModel,
TranslationPageOutput,
- TranslationPageRequest,
- TranslationPageRequestSelectedModel,
- TranslationPageResponse,
TranslationPageStatusResponse,
+ UpscaleRequestSelectedModelsItem,
ValidationError,
ValidationErrorLocItem,
Vcard,
VideoBotsPageOutput,
VideoBotsPageOutputFinalKeywordQuery,
VideoBotsPageOutputFinalPrompt,
- VideoBotsPageRequest,
- VideoBotsPageRequestAsrModel,
- VideoBotsPageRequestCitationStyle,
- VideoBotsPageRequestEmbeddingModel,
- VideoBotsPageRequestLipsyncModel,
- VideoBotsPageRequestOpenaiTtsModel,
- VideoBotsPageRequestOpenaiVoiceName,
- VideoBotsPageRequestResponseFormatType,
- VideoBotsPageRequestSelectedModel,
- VideoBotsPageRequestTranslationModel,
- VideoBotsPageRequestTtsProvider,
- VideoBotsPageResponse,
VideoBotsPageStatusResponse,
+ WebSearchLlmRequestEmbeddingModel,
+ WebSearchLlmRequestResponseFormatType,
+ WebSearchLlmRequestSelectedModel,
)
from .errors import (
BadRequestError,
@@ -309,31 +197,72 @@
UnprocessableEntityError,
)
from . import (
+ ai_animation_generator,
+ ai_art_qr_code,
+ ai_background_changer,
+ ai_generated_photo_from_email_profile_lookup,
+ ai_image_with_a_face,
bulk_runner,
+ chyron_plant_bot,
+ compare_ai_image_generators,
+ compare_ai_image_upscalers,
+ compare_ai_translations,
+ compare_ai_voice_generators,
copilot_for_your_enterprise,
copilot_integrations,
+ create_a_perfect_seo_optimized_title_paragraph,
+ edit_an_image_with_ai_prompt,
embeddings,
evaluator,
functions,
+ generate_people_also_ask_seo_content,
+ generate_product_photo_backgrounds,
+ large_language_models_gpt3,
+ letter_writer,
lip_syncing,
+ lipsync_video_with_any_text,
misc,
+ people_also_ask_answers_from_a_doc,
+ profile_lookup_gpt3for_ai_personalized_emails,
+ render_image_search_results_with_ai,
+ search_your_docs_with_gpt,
smart_gpt,
+ speech_recognition_translation,
+ summarize_your_docs_with_gpt,
+ synthetic_data_maker_for_videos_pd_fs,
+ text_guided_audio_generator,
+ web_search_gpt3,
)
from .client import AsyncGooey, Gooey
+from .copilot_for_your_enterprise import (
+ AsyncFormVideoBotsRequestAsrModel,
+ AsyncFormVideoBotsRequestCitationStyle,
+ AsyncFormVideoBotsRequestEmbeddingModel,
+ AsyncFormVideoBotsRequestLipsyncModel,
+ AsyncFormVideoBotsRequestOpenaiTtsModel,
+ AsyncFormVideoBotsRequestOpenaiVoiceName,
+ AsyncFormVideoBotsRequestResponseFormatType,
+ AsyncFormVideoBotsRequestSelectedModel,
+ AsyncFormVideoBotsRequestTranslationModel,
+ AsyncFormVideoBotsRequestTtsProvider,
+)
from .copilot_integrations import (
- CreateStreamRequestAsrModel,
- CreateStreamRequestCitationStyle,
- CreateStreamRequestEmbeddingModel,
- CreateStreamRequestLipsyncModel,
- CreateStreamRequestOpenaiTtsModel,
- CreateStreamRequestOpenaiVoiceName,
- CreateStreamRequestResponseFormatType,
- CreateStreamRequestSelectedModel,
- CreateStreamRequestTranslationModel,
- CreateStreamRequestTtsProvider,
+ VideoBotsStreamCreateRequestAsrModel,
+ VideoBotsStreamCreateRequestCitationStyle,
+ VideoBotsStreamCreateRequestEmbeddingModel,
+ VideoBotsStreamCreateRequestLipsyncModel,
+ VideoBotsStreamCreateRequestOpenaiTtsModel,
+ VideoBotsStreamCreateRequestOpenaiVoiceName,
+ VideoBotsStreamCreateRequestResponseFormatType,
+ VideoBotsStreamCreateRequestSelectedModel,
+ VideoBotsStreamCreateRequestTranslationModel,
+ VideoBotsStreamCreateRequestTtsProvider,
VideoBotsStreamResponse,
)
from .environment import GooeyEnvironment
+from .evaluator import AsyncFormBulkEvalRequestResponseFormatType, AsyncFormBulkEvalRequestSelectedModel
+from .lip_syncing import AsyncFormLipsyncRequestSelectedModel
+from .smart_gpt import AsyncFormSmartGptRequestResponseFormatType, AsyncFormSmartGptRequestSelectedModel
from .version import __version__
__all__ = [
@@ -341,64 +270,36 @@
"AggFunctionFunction",
"AggFunctionResult",
"AggFunctionResultFunction",
+ "AnimateRequestSelectedModel",
"AnimationPrompt",
"AsrChunk",
"AsrOutputJson",
"AsrPageOutput",
"AsrPageOutputOutputTextItem",
- "AsrPageRequest",
- "AsrPageRequestOutputFormat",
- "AsrPageRequestSelectedModel",
- "AsrPageRequestTranslationModel",
- "AsrPageResponse",
"AsrPageStatusResponse",
"AsyncApiResponseModelV3",
+ "AsyncFormBulkEvalRequestResponseFormatType",
+ "AsyncFormBulkEvalRequestSelectedModel",
+ "AsyncFormLipsyncRequestSelectedModel",
+ "AsyncFormSmartGptRequestResponseFormatType",
+ "AsyncFormSmartGptRequestSelectedModel",
+ "AsyncFormVideoBotsRequestAsrModel",
+ "AsyncFormVideoBotsRequestCitationStyle",
+ "AsyncFormVideoBotsRequestEmbeddingModel",
+ "AsyncFormVideoBotsRequestLipsyncModel",
+ "AsyncFormVideoBotsRequestOpenaiTtsModel",
+ "AsyncFormVideoBotsRequestOpenaiVoiceName",
+ "AsyncFormVideoBotsRequestResponseFormatType",
+ "AsyncFormVideoBotsRequestSelectedModel",
+ "AsyncFormVideoBotsRequestTranslationModel",
+ "AsyncFormVideoBotsRequestTtsProvider",
"AsyncGooey",
"BadRequestError",
"BalanceResponse",
- "BodyAsyncFormArtQrCode",
- "BodyAsyncFormAsr",
- "BodyAsyncFormBulkEval",
- "BodyAsyncFormBulkRunner",
- "BodyAsyncFormChyronPlant",
- "BodyAsyncFormCompareAiUpscalers",
- "BodyAsyncFormCompareLlm",
- "BodyAsyncFormCompareText2Img",
- "BodyAsyncFormDeforumSd",
- "BodyAsyncFormDocExtract",
- "BodyAsyncFormDocSearch",
- "BodyAsyncFormDocSummary",
- "BodyAsyncFormEmailFaceInpainting",
- "BodyAsyncFormEmbeddings",
- "BodyAsyncFormFaceInpainting",
- "BodyAsyncFormFunctions",
- "BodyAsyncFormGoogleGpt",
- "BodyAsyncFormGoogleImageGen",
- "BodyAsyncFormImageSegmentation",
- "BodyAsyncFormImg2Img",
- "BodyAsyncFormLetterWriter",
- "BodyAsyncFormLipsync",
- "BodyAsyncFormLipsyncTts",
- "BodyAsyncFormObjectInpainting",
- "BodyAsyncFormRelatedQnaMaker",
- "BodyAsyncFormRelatedQnaMakerDoc",
- "BodyAsyncFormSeoSummary",
- "BodyAsyncFormSmartGpt",
- "BodyAsyncFormSocialLookupEmail",
- "BodyAsyncFormText2Audio",
- "BodyAsyncFormTextToSpeech",
- "BodyAsyncFormTranslate",
- "BodyAsyncFormVideoBots",
"BotBroadcastFilters",
"BulkEvalPageOutput",
- "BulkEvalPageRequest",
- "BulkEvalPageRequestResponseFormatType",
- "BulkEvalPageRequestSelectedModel",
- "BulkEvalPageResponse",
"BulkEvalPageStatusResponse",
"BulkRunnerPageOutput",
- "BulkRunnerPageRequest",
- "BulkRunnerPageResponse",
"BulkRunnerPageStatusResponse",
"ButtonPressed",
"CalledFunctionResponse",
@@ -407,24 +308,12 @@
"ChatCompletionContentPartTextParam",
"ChyronPlantPageOutput",
"ChyronPlantPageRequest",
- "ChyronPlantPageResponse",
"ChyronPlantPageStatusResponse",
"CompareLlmPageOutput",
- "CompareLlmPageRequest",
- "CompareLlmPageRequestResponseFormatType",
- "CompareLlmPageRequestSelectedModelsItem",
- "CompareLlmPageResponse",
"CompareLlmPageStatusResponse",
"CompareText2ImgPageOutput",
- "CompareText2ImgPageRequest",
- "CompareText2ImgPageRequestScheduler",
- "CompareText2ImgPageRequestSelectedModelsItem",
- "CompareText2ImgPageResponse",
"CompareText2ImgPageStatusResponse",
"CompareUpscalerPageOutput",
- "CompareUpscalerPageRequest",
- "CompareUpscalerPageRequestSelectedModelsItem",
- "CompareUpscalerPageResponse",
"CompareUpscalerPageStatusResponse",
"ConsoleLogs",
"ConsoleLogsLevel",
@@ -435,156 +324,97 @@
"ConversationEntryContentItem_Text",
"ConversationEntryRole",
"ConversationStart",
- "CreateStreamRequestAsrModel",
- "CreateStreamRequestCitationStyle",
- "CreateStreamRequestEmbeddingModel",
- "CreateStreamRequestLipsyncModel",
- "CreateStreamRequestOpenaiTtsModel",
- "CreateStreamRequestOpenaiVoiceName",
- "CreateStreamRequestResponseFormatType",
- "CreateStreamRequestSelectedModel",
- "CreateStreamRequestTranslationModel",
- "CreateStreamRequestTtsProvider",
"CreateStreamResponse",
"DeforumSdPageOutput",
- "DeforumSdPageRequest",
- "DeforumSdPageRequestSelectedModel",
- "DeforumSdPageResponse",
"DeforumSdPageStatusResponse",
"DocExtractPageOutput",
- "DocExtractPageRequest",
- "DocExtractPageRequestResponseFormatType",
- "DocExtractPageRequestSelectedAsrModel",
- "DocExtractPageRequestSelectedModel",
- "DocExtractPageResponse",
"DocExtractPageStatusResponse",
"DocSearchPageOutput",
- "DocSearchPageRequest",
- "DocSearchPageRequestCitationStyle",
- "DocSearchPageRequestEmbeddingModel",
- "DocSearchPageRequestKeywordQuery",
- "DocSearchPageRequestResponseFormatType",
- "DocSearchPageRequestSelectedModel",
- "DocSearchPageResponse",
"DocSearchPageStatusResponse",
"DocSummaryPageOutput",
- "DocSummaryPageRequest",
- "DocSummaryPageRequestResponseFormatType",
- "DocSummaryPageRequestSelectedAsrModel",
- "DocSummaryPageRequestSelectedModel",
- "DocSummaryPageResponse",
"DocSummaryPageStatusResponse",
+ "DocSummaryRequestResponseFormatType",
+ "DocSummaryRequestSelectedAsrModel",
+ "DocSummaryRequestSelectedModel",
"EmailFaceInpaintingPageOutput",
- "EmailFaceInpaintingPageRequest",
- "EmailFaceInpaintingPageRequestSelectedModel",
- "EmailFaceInpaintingPageResponse",
"EmailFaceInpaintingPageStatusResponse",
+ "EmbedRequestSelectedModel",
"EmbeddingsPageOutput",
- "EmbeddingsPageRequest",
- "EmbeddingsPageRequestSelectedModel",
- "EmbeddingsPageResponse",
"EmbeddingsPageStatusResponse",
"EvalPrompt",
"FaceInpaintingPageOutput",
- "FaceInpaintingPageRequest",
- "FaceInpaintingPageRequestSelectedModel",
- "FaceInpaintingPageResponse",
"FaceInpaintingPageStatusResponse",
"FailedReponseModelV2",
"FailedResponseDetail",
"FinalResponse",
"FunctionsPageOutput",
- "FunctionsPageRequest",
- "FunctionsPageResponse",
"FunctionsPageStatusResponse",
"GenericErrorResponse",
"GenericErrorResponseDetail",
"Gooey",
"GooeyEnvironment",
"GoogleGptPageOutput",
- "GoogleGptPageRequest",
- "GoogleGptPageRequestEmbeddingModel",
- "GoogleGptPageRequestResponseFormatType",
- "GoogleGptPageRequestSelectedModel",
- "GoogleGptPageResponse",
"GoogleGptPageStatusResponse",
"GoogleImageGenPageOutput",
- "GoogleImageGenPageRequest",
- "GoogleImageGenPageRequestSelectedModel",
- "GoogleImageGenPageResponse",
"GoogleImageGenPageStatusResponse",
"HttpValidationError",
+ "ImageFromEmailRequestSelectedModel",
+ "ImageFromWebSearchRequestSelectedModel",
"ImageSegmentationPageOutput",
- "ImageSegmentationPageRequest",
- "ImageSegmentationPageRequestSelectedModel",
- "ImageSegmentationPageResponse",
"ImageSegmentationPageStatusResponse",
"ImageUrl",
"ImageUrlDetail",
"Img2ImgPageOutput",
- "Img2ImgPageRequest",
- "Img2ImgPageRequestSelectedControlnetModel",
- "Img2ImgPageRequestSelectedControlnetModelItem",
- "Img2ImgPageRequestSelectedModel",
- "Img2ImgPageResponse",
"Img2ImgPageStatusResponse",
"InternalServerError",
"LetterWriterPageOutput",
"LetterWriterPageRequest",
- "LetterWriterPageResponse",
"LetterWriterPageStatusResponse",
"LipsyncPageOutput",
- "LipsyncPageRequest",
- "LipsyncPageRequestSelectedModel",
- "LipsyncPageResponse",
"LipsyncPageStatusResponse",
"LipsyncTtsPageOutput",
- "LipsyncTtsPageRequest",
- "LipsyncTtsPageRequestOpenaiTtsModel",
- "LipsyncTtsPageRequestOpenaiVoiceName",
- "LipsyncTtsPageRequestSelectedModel",
- "LipsyncTtsPageRequestTtsProvider",
- "LipsyncTtsPageResponse",
"LipsyncTtsPageStatusResponse",
+ "LipsyncTtsRequestOpenaiTtsModel",
+ "LipsyncTtsRequestOpenaiVoiceName",
+ "LipsyncTtsRequestSelectedModel",
+ "LipsyncTtsRequestTtsProvider",
+ "LlmRequestResponseFormatType",
+ "LlmRequestSelectedModelsItem",
"LlmTools",
"MessagePart",
"ObjectInpaintingPageOutput",
- "ObjectInpaintingPageRequest",
- "ObjectInpaintingPageRequestSelectedModel",
- "ObjectInpaintingPageResponse",
"ObjectInpaintingPageStatusResponse",
"PaymentRequiredError",
+ "PersonalizeEmailRequestResponseFormatType",
+ "PersonalizeEmailRequestSelectedModel",
+ "PortraitRequestSelectedModel",
+ "ProductImageRequestSelectedModel",
"PromptTreeNode",
"PromptTreeNodePrompt",
"QrCodeGeneratorPageOutput",
- "QrCodeGeneratorPageRequest",
- "QrCodeGeneratorPageRequestImagePromptControlnetModelsItem",
- "QrCodeGeneratorPageRequestScheduler",
- "QrCodeGeneratorPageRequestSelectedControlnetModelItem",
- "QrCodeGeneratorPageRequestSelectedModel",
- "QrCodeGeneratorPageResponse",
"QrCodeGeneratorPageStatusResponse",
+ "QrCodeRequestImagePromptControlnetModelsItem",
+ "QrCodeRequestScheduler",
+ "QrCodeRequestSelectedControlnetModelItem",
+ "QrCodeRequestSelectedModel",
+ "RagRequestCitationStyle",
+ "RagRequestEmbeddingModel",
+ "RagRequestKeywordQuery",
+ "RagRequestResponseFormatType",
+ "RagRequestSelectedModel",
"RecipeFunction",
"RecipeFunctionTrigger",
"RecipeRunState",
"RelatedDocSearchResponse",
"RelatedGoogleGptResponse",
"RelatedQnADocPageOutput",
- "RelatedQnADocPageRequest",
- "RelatedQnADocPageRequestCitationStyle",
- "RelatedQnADocPageRequestEmbeddingModel",
- "RelatedQnADocPageRequestKeywordQuery",
- "RelatedQnADocPageRequestResponseFormatType",
- "RelatedQnADocPageRequestSelectedModel",
- "RelatedQnADocPageResponse",
"RelatedQnADocPageStatusResponse",
"RelatedQnAPageOutput",
- "RelatedQnAPageRequest",
- "RelatedQnAPageRequestEmbeddingModel",
- "RelatedQnAPageRequestResponseFormatType",
- "RelatedQnAPageRequestSelectedModel",
- "RelatedQnAPageResponse",
"RelatedQnAPageStatusResponse",
+ "RemixImageRequestSelectedControlnetModel",
+ "RemixImageRequestSelectedControlnetModelItem",
+ "RemixImageRequestSelectedModel",
+ "RemoveBackgroundRequestSelectedModel",
"ReplyButton",
"ResponseModel",
"ResponseModelFinalKeywordQuery",
@@ -595,74 +425,102 @@
"SadTalkerSettings",
"SadTalkerSettingsPreprocess",
"SearchReference",
+ "SeoContentRequestResponseFormatType",
+ "SeoContentRequestSelectedModel",
+ "SeoPeopleAlsoAskDocRequestCitationStyle",
+ "SeoPeopleAlsoAskDocRequestEmbeddingModel",
+ "SeoPeopleAlsoAskDocRequestKeywordQuery",
+ "SeoPeopleAlsoAskDocRequestResponseFormatType",
+ "SeoPeopleAlsoAskDocRequestSelectedModel",
+ "SeoPeopleAlsoAskRequestEmbeddingModel",
+ "SeoPeopleAlsoAskRequestResponseFormatType",
+ "SeoPeopleAlsoAskRequestSelectedModel",
"SeoSummaryPageOutput",
- "SeoSummaryPageRequest",
- "SeoSummaryPageRequestResponseFormatType",
- "SeoSummaryPageRequestSelectedModel",
- "SeoSummaryPageResponse",
"SeoSummaryPageStatusResponse",
"SerpSearchLocation",
"SerpSearchType",
"SmartGptPageOutput",
- "SmartGptPageRequest",
- "SmartGptPageRequestResponseFormatType",
- "SmartGptPageRequestSelectedModel",
- "SmartGptPageResponse",
"SmartGptPageStatusResponse",
"SocialLookupEmailPageOutput",
- "SocialLookupEmailPageRequest",
- "SocialLookupEmailPageRequestResponseFormatType",
- "SocialLookupEmailPageRequestSelectedModel",
- "SocialLookupEmailPageResponse",
"SocialLookupEmailPageStatusResponse",
+ "SpeechRecognitionRequestOutputFormat",
+ "SpeechRecognitionRequestSelectedModel",
+ "SpeechRecognitionRequestTranslationModel",
"StreamError",
+ "SynthesizeDataRequestResponseFormatType",
+ "SynthesizeDataRequestSelectedAsrModel",
+ "SynthesizeDataRequestSelectedModel",
"Text2AudioPageOutput",
- "Text2AudioPageRequest",
- "Text2AudioPageResponse",
"Text2AudioPageStatusResponse",
+ "TextToImageRequestScheduler",
+ "TextToImageRequestSelectedModelsItem",
"TextToSpeechPageOutput",
- "TextToSpeechPageRequest",
- "TextToSpeechPageRequestOpenaiTtsModel",
- "TextToSpeechPageRequestOpenaiVoiceName",
- "TextToSpeechPageRequestTtsProvider",
- "TextToSpeechPageResponse",
"TextToSpeechPageStatusResponse",
+ "TextToSpeechRequestOpenaiTtsModel",
+ "TextToSpeechRequestOpenaiVoiceName",
+ "TextToSpeechRequestTtsProvider",
"TooManyRequestsError",
"TrainingDataModel",
+ "TranslateRequestSelectedModel",
"TranslationPageOutput",
- "TranslationPageRequest",
- "TranslationPageRequestSelectedModel",
- "TranslationPageResponse",
"TranslationPageStatusResponse",
"UnprocessableEntityError",
+ "UpscaleRequestSelectedModelsItem",
"ValidationError",
"ValidationErrorLocItem",
"Vcard",
"VideoBotsPageOutput",
"VideoBotsPageOutputFinalKeywordQuery",
"VideoBotsPageOutputFinalPrompt",
- "VideoBotsPageRequest",
- "VideoBotsPageRequestAsrModel",
- "VideoBotsPageRequestCitationStyle",
- "VideoBotsPageRequestEmbeddingModel",
- "VideoBotsPageRequestLipsyncModel",
- "VideoBotsPageRequestOpenaiTtsModel",
- "VideoBotsPageRequestOpenaiVoiceName",
- "VideoBotsPageRequestResponseFormatType",
- "VideoBotsPageRequestSelectedModel",
- "VideoBotsPageRequestTranslationModel",
- "VideoBotsPageRequestTtsProvider",
- "VideoBotsPageResponse",
"VideoBotsPageStatusResponse",
+ "VideoBotsStreamCreateRequestAsrModel",
+ "VideoBotsStreamCreateRequestCitationStyle",
+ "VideoBotsStreamCreateRequestEmbeddingModel",
+ "VideoBotsStreamCreateRequestLipsyncModel",
+ "VideoBotsStreamCreateRequestOpenaiTtsModel",
+ "VideoBotsStreamCreateRequestOpenaiVoiceName",
+ "VideoBotsStreamCreateRequestResponseFormatType",
+ "VideoBotsStreamCreateRequestSelectedModel",
+ "VideoBotsStreamCreateRequestTranslationModel",
+ "VideoBotsStreamCreateRequestTtsProvider",
"VideoBotsStreamResponse",
+ "WebSearchLlmRequestEmbeddingModel",
+ "WebSearchLlmRequestResponseFormatType",
+ "WebSearchLlmRequestSelectedModel",
"__version__",
+ "ai_animation_generator",
+ "ai_art_qr_code",
+ "ai_background_changer",
+ "ai_generated_photo_from_email_profile_lookup",
+ "ai_image_with_a_face",
"bulk_runner",
+ "chyron_plant_bot",
+ "compare_ai_image_generators",
+ "compare_ai_image_upscalers",
+ "compare_ai_translations",
+ "compare_ai_voice_generators",
"copilot_for_your_enterprise",
"copilot_integrations",
+ "create_a_perfect_seo_optimized_title_paragraph",
+ "edit_an_image_with_ai_prompt",
"embeddings",
"evaluator",
"functions",
+ "generate_people_also_ask_seo_content",
+ "generate_product_photo_backgrounds",
+ "large_language_models_gpt3",
+ "letter_writer",
"lip_syncing",
+ "lipsync_video_with_any_text",
"misc",
+ "people_also_ask_answers_from_a_doc",
+ "profile_lookup_gpt3for_ai_personalized_emails",
+ "render_image_search_results_with_ai",
+ "search_your_docs_with_gpt",
"smart_gpt",
+ "speech_recognition_translation",
+ "summarize_your_docs_with_gpt",
+ "synthetic_data_maker_for_videos_pd_fs",
+ "text_guided_audio_generator",
+ "web_search_gpt3",
]
diff --git a/src/gooey/ai_animation_generator/__init__.py b/src/gooey/ai_animation_generator/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/ai_animation_generator/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/ai_animation_generator/client.py b/src/gooey/ai_animation_generator/client.py
new file mode 100644
index 0000000..b510152
--- /dev/null
+++ b/src/gooey/ai_animation_generator/client.py
@@ -0,0 +1,133 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import parse_obj_as
+from ..core.request_options import RequestOptions
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.deforum_sd_page_status_response import DeforumSdPageStatusResponse
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+
+
+class AiAnimationGeneratorClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def status_deforum_sd(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> DeforumSdPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ DeforumSdPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.ai_animation_generator.status_deforum_sd(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/DeforumSD/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(DeforumSdPageStatusResponse, parse_obj_as(type_=DeforumSdPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncAiAnimationGeneratorClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def status_deforum_sd(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> DeforumSdPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ DeforumSdPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.ai_animation_generator.status_deforum_sd(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/DeforumSD/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(DeforumSdPageStatusResponse, parse_obj_as(type_=DeforumSdPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/ai_art_qr_code/__init__.py b/src/gooey/ai_art_qr_code/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/ai_art_qr_code/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/ai_art_qr_code/client.py b/src/gooey/ai_art_qr_code/client.py
new file mode 100644
index 0000000..ca94e4e
--- /dev/null
+++ b/src/gooey/ai_art_qr_code/client.py
@@ -0,0 +1,133 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import parse_obj_as
+from ..core.request_options import RequestOptions
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.qr_code_generator_page_status_response import QrCodeGeneratorPageStatusResponse
+
+
+class AiArtQrCodeClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def status_art_qr_code(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> QrCodeGeneratorPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ QrCodeGeneratorPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.ai_art_qr_code.status_art_qr_code(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/art-qr-code/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(QrCodeGeneratorPageStatusResponse, parse_obj_as(type_=QrCodeGeneratorPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncAiArtQrCodeClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def status_art_qr_code(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> QrCodeGeneratorPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ QrCodeGeneratorPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.ai_art_qr_code.status_art_qr_code(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/art-qr-code/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(QrCodeGeneratorPageStatusResponse, parse_obj_as(type_=QrCodeGeneratorPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/ai_background_changer/__init__.py b/src/gooey/ai_background_changer/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/ai_background_changer/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/ai_background_changer/client.py b/src/gooey/ai_background_changer/client.py
new file mode 100644
index 0000000..0c430f5
--- /dev/null
+++ b/src/gooey/ai_background_changer/client.py
@@ -0,0 +1,133 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import parse_obj_as
+from ..core.request_options import RequestOptions
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.image_segmentation_page_status_response import ImageSegmentationPageStatusResponse
+
+
+class AiBackgroundChangerClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def status_image_segmentation(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> ImageSegmentationPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ImageSegmentationPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.ai_background_changer.status_image_segmentation(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/ImageSegmentation/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(ImageSegmentationPageStatusResponse, parse_obj_as(type_=ImageSegmentationPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncAiBackgroundChangerClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def status_image_segmentation(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> ImageSegmentationPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ImageSegmentationPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.ai_background_changer.status_image_segmentation(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/ImageSegmentation/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(ImageSegmentationPageStatusResponse, parse_obj_as(type_=ImageSegmentationPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/ai_generated_photo_from_email_profile_lookup/__init__.py b/src/gooey/ai_generated_photo_from_email_profile_lookup/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/ai_generated_photo_from_email_profile_lookup/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/ai_generated_photo_from_email_profile_lookup/client.py b/src/gooey/ai_generated_photo_from_email_profile_lookup/client.py
new file mode 100644
index 0000000..1b29a5a
--- /dev/null
+++ b/src/gooey/ai_generated_photo_from_email_profile_lookup/client.py
@@ -0,0 +1,133 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import parse_obj_as
+from ..core.request_options import RequestOptions
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.email_face_inpainting_page_status_response import EmailFaceInpaintingPageStatusResponse
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+
+
+class AiGeneratedPhotoFromEmailProfileLookupClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def status_email_face_inpainting(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> EmailFaceInpaintingPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ EmailFaceInpaintingPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.ai_generated_photo_from_email_profile_lookup.status_email_face_inpainting(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/EmailFaceInpainting/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(EmailFaceInpaintingPageStatusResponse, parse_obj_as(type_=EmailFaceInpaintingPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncAiGeneratedPhotoFromEmailProfileLookupClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def status_email_face_inpainting(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> EmailFaceInpaintingPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ EmailFaceInpaintingPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.ai_generated_photo_from_email_profile_lookup.status_email_face_inpainting(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/EmailFaceInpainting/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(EmailFaceInpaintingPageStatusResponse, parse_obj_as(type_=EmailFaceInpaintingPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/ai_image_with_a_face/__init__.py b/src/gooey/ai_image_with_a_face/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/ai_image_with_a_face/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/ai_image_with_a_face/client.py b/src/gooey/ai_image_with_a_face/client.py
new file mode 100644
index 0000000..9866b9a
--- /dev/null
+++ b/src/gooey/ai_image_with_a_face/client.py
@@ -0,0 +1,133 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import parse_obj_as
+from ..core.request_options import RequestOptions
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.face_inpainting_page_status_response import FaceInpaintingPageStatusResponse
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+
+
+class AiImageWithAFaceClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def status_face_inpainting(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> FaceInpaintingPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ FaceInpaintingPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.ai_image_with_a_face.status_face_inpainting(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/FaceInpainting/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(FaceInpaintingPageStatusResponse, parse_obj_as(type_=FaceInpaintingPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncAiImageWithAFaceClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def status_face_inpainting(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> FaceInpaintingPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ FaceInpaintingPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.ai_image_with_a_face.status_face_inpainting(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/FaceInpainting/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(FaceInpaintingPageStatusResponse, parse_obj_as(type_=FaceInpaintingPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/bulk_runner/client.py b/src/gooey/bulk_runner/client.py
index 0c7faa8..a1d42ae 100644
--- a/src/gooey/bulk_runner/client.py
+++ b/src/gooey/bulk_runner/client.py
@@ -5,23 +5,35 @@
from ..core.api_error import ApiError
from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import parse_obj_as
from ..core.request_options import RequestOptions
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.bulk_runner_page_status_response import BulkRunnerPageStatusResponse
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
class BulkRunnerClient:
def __init__(self, *, client_wrapper: SyncClientWrapper):
self._client_wrapper = client_wrapper
- def post(self, *, request_options: typing.Optional[RequestOptions] = None) -> None:
+ def status_bulk_runner(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> BulkRunnerPageStatusResponse:
"""
Parameters
----------
+ run_id : str
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- None
+ BulkRunnerPageStatusResponse
+ Successful Response
Examples
--------
@@ -30,14 +42,28 @@ def post(self, *, request_options: typing.Optional[RequestOptions] = None) -> No
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.bulk_runner.post()
+ client.bulk_runner.status_bulk_runner(
+ run_id="run_id",
+ )
"""
_response = self._client_wrapper.httpx_client.request(
- "v2/bulk-runner/", method="POST", request_options=request_options
+ "v3/bulk-runner/status", method="GET", params={"run_id": run_id}, request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return
+ return typing.cast(BulkRunnerPageStatusResponse, parse_obj_as(type_=BulkRunnerPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
@@ -48,16 +74,21 @@ class AsyncBulkRunnerClient:
def __init__(self, *, client_wrapper: AsyncClientWrapper):
self._client_wrapper = client_wrapper
- async def post(self, *, request_options: typing.Optional[RequestOptions] = None) -> None:
+ async def status_bulk_runner(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> BulkRunnerPageStatusResponse:
"""
Parameters
----------
+ run_id : str
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- None
+ BulkRunnerPageStatusResponse
+ Successful Response
Examples
--------
@@ -71,17 +102,31 @@ async def post(self, *, request_options: typing.Optional[RequestOptions] = None)
async def main() -> None:
- await client.bulk_runner.post()
+ await client.bulk_runner.status_bulk_runner(
+ run_id="run_id",
+ )
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v2/bulk-runner/", method="POST", request_options=request_options
+ "v3/bulk-runner/status", method="GET", params={"run_id": run_id}, request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return
+ return typing.cast(BulkRunnerPageStatusResponse, parse_obj_as(type_=BulkRunnerPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
diff --git a/src/gooey/chyron_plant_bot/__init__.py b/src/gooey/chyron_plant_bot/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/chyron_plant_bot/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/chyron_plant_bot/client.py b/src/gooey/chyron_plant_bot/client.py
new file mode 100644
index 0000000..4ba9907
--- /dev/null
+++ b/src/gooey/chyron_plant_bot/client.py
@@ -0,0 +1,133 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import parse_obj_as
+from ..core.request_options import RequestOptions
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.chyron_plant_page_status_response import ChyronPlantPageStatusResponse
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+
+
+class ChyronPlantBotClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def status_chyron_plant(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> ChyronPlantPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ChyronPlantPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.chyron_plant_bot.status_chyron_plant(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/ChyronPlant/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(ChyronPlantPageStatusResponse, parse_obj_as(type_=ChyronPlantPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncChyronPlantBotClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def status_chyron_plant(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> ChyronPlantPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ChyronPlantPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.chyron_plant_bot.status_chyron_plant(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/ChyronPlant/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(ChyronPlantPageStatusResponse, parse_obj_as(type_=ChyronPlantPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/client.py b/src/gooey/client.py
index fd604e2..d0bf440 100644
--- a/src/gooey/client.py
+++ b/src/gooey/client.py
@@ -6,13 +6,31 @@
import httpx
+from .ai_animation_generator.client import AiAnimationGeneratorClient, AsyncAiAnimationGeneratorClient
+from .ai_art_qr_code.client import AiArtQrCodeClient, AsyncAiArtQrCodeClient
+from .ai_background_changer.client import AiBackgroundChangerClient, AsyncAiBackgroundChangerClient
+from .ai_generated_photo_from_email_profile_lookup.client import (
+ AiGeneratedPhotoFromEmailProfileLookupClient,
+ AsyncAiGeneratedPhotoFromEmailProfileLookupClient,
+)
+from .ai_image_with_a_face.client import AiImageWithAFaceClient, AsyncAiImageWithAFaceClient
from .bulk_runner.client import AsyncBulkRunnerClient, BulkRunnerClient
+from .chyron_plant_bot.client import AsyncChyronPlantBotClient, ChyronPlantBotClient
+from .compare_ai_image_generators.client import AsyncCompareAiImageGeneratorsClient, CompareAiImageGeneratorsClient
+from .compare_ai_image_upscalers.client import AsyncCompareAiImageUpscalersClient, CompareAiImageUpscalersClient
+from .compare_ai_translations.client import AsyncCompareAiTranslationsClient, CompareAiTranslationsClient
+from .compare_ai_voice_generators.client import AsyncCompareAiVoiceGeneratorsClient, CompareAiVoiceGeneratorsClient
from .copilot_for_your_enterprise.client import AsyncCopilotForYourEnterpriseClient, CopilotForYourEnterpriseClient
from .copilot_integrations.client import AsyncCopilotIntegrationsClient, CopilotIntegrationsClient
from .core.api_error import ApiError
from .core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
from .core.pydantic_utilities import parse_obj_as
from .core.request_options import RequestOptions
+from .create_a_perfect_seo_optimized_title_paragraph.client import (
+ AsyncCreateAPerfectSeoOptimizedTitleParagraphClient,
+ CreateAPerfectSeoOptimizedTitleParagraphClient,
+)
+from .edit_an_image_with_ai_prompt.client import AsyncEditAnImageWithAiPromptClient, EditAnImageWithAiPromptClient
from .embeddings.client import AsyncEmbeddingsClient, EmbeddingsClient
from .environment import GooeyEnvironment
from .errors.bad_request_error import BadRequestError
@@ -22,71 +40,138 @@
from .errors.unprocessable_entity_error import UnprocessableEntityError
from .evaluator.client import AsyncEvaluatorClient, EvaluatorClient
from .functions.client import AsyncFunctionsClient, FunctionsClient
+from .generate_people_also_ask_seo_content.client import (
+ AsyncGeneratePeopleAlsoAskSeoContentClient,
+ GeneratePeopleAlsoAskSeoContentClient,
+)
+from .generate_product_photo_backgrounds.client import (
+ AsyncGenerateProductPhotoBackgroundsClient,
+ GenerateProductPhotoBackgroundsClient,
+)
+from .large_language_models_gpt3.client import AsyncLargeLanguageModelsGpt3Client, LargeLanguageModelsGpt3Client
+from .letter_writer.client import AsyncLetterWriterClient, LetterWriterClient
from .lip_syncing.client import AsyncLipSyncingClient, LipSyncingClient
+from .lipsync_video_with_any_text.client import AsyncLipsyncVideoWithAnyTextClient, LipsyncVideoWithAnyTextClient
from .misc.client import AsyncMiscClient, MiscClient
+from .people_also_ask_answers_from_a_doc.client import (
+ AsyncPeopleAlsoAskAnswersFromADocClient,
+ PeopleAlsoAskAnswersFromADocClient,
+)
+from .profile_lookup_gpt3for_ai_personalized_emails.client import (
+ AsyncProfileLookupGpt3ForAiPersonalizedEmailsClient,
+ ProfileLookupGpt3ForAiPersonalizedEmailsClient,
+)
+from .render_image_search_results_with_ai.client import (
+ AsyncRenderImageSearchResultsWithAiClient,
+ RenderImageSearchResultsWithAiClient,
+)
+from .search_your_docs_with_gpt.client import AsyncSearchYourDocsWithGptClient, SearchYourDocsWithGptClient
from .smart_gpt.client import AsyncSmartGptClient, SmartGptClient
-from .types.asr_page_response import AsrPageResponse
-from .types.body_async_form_art_qr_code import BodyAsyncFormArtQrCode
-from .types.body_async_form_asr import BodyAsyncFormAsr
-from .types.body_async_form_bulk_runner import BodyAsyncFormBulkRunner
-from .types.body_async_form_compare_ai_upscalers import BodyAsyncFormCompareAiUpscalers
-from .types.body_async_form_compare_llm import BodyAsyncFormCompareLlm
-from .types.body_async_form_compare_text2img import BodyAsyncFormCompareText2Img
-from .types.body_async_form_deforum_sd import BodyAsyncFormDeforumSd
-from .types.body_async_form_doc_extract import BodyAsyncFormDocExtract
-from .types.body_async_form_doc_search import BodyAsyncFormDocSearch
-from .types.body_async_form_doc_summary import BodyAsyncFormDocSummary
-from .types.body_async_form_email_face_inpainting import BodyAsyncFormEmailFaceInpainting
-from .types.body_async_form_embeddings import BodyAsyncFormEmbeddings
-from .types.body_async_form_face_inpainting import BodyAsyncFormFaceInpainting
-from .types.body_async_form_google_gpt import BodyAsyncFormGoogleGpt
-from .types.body_async_form_google_image_gen import BodyAsyncFormGoogleImageGen
-from .types.body_async_form_image_segmentation import BodyAsyncFormImageSegmentation
-from .types.body_async_form_img2img import BodyAsyncFormImg2Img
-from .types.body_async_form_lipsync_tts import BodyAsyncFormLipsyncTts
-from .types.body_async_form_object_inpainting import BodyAsyncFormObjectInpainting
-from .types.body_async_form_related_qna_maker import BodyAsyncFormRelatedQnaMaker
-from .types.body_async_form_related_qna_maker_doc import BodyAsyncFormRelatedQnaMakerDoc
-from .types.body_async_form_seo_summary import BodyAsyncFormSeoSummary
-from .types.body_async_form_social_lookup_email import BodyAsyncFormSocialLookupEmail
-from .types.body_async_form_text2audio import BodyAsyncFormText2Audio
-from .types.body_async_form_text_to_speech import BodyAsyncFormTextToSpeech
-from .types.body_async_form_translate import BodyAsyncFormTranslate
-from .types.bulk_eval_page_response import BulkEvalPageResponse
-from .types.bulk_runner_page_response import BulkRunnerPageResponse
-from .types.chyron_plant_page_response import ChyronPlantPageResponse
-from .types.compare_llm_page_response import CompareLlmPageResponse
-from .types.compare_text2img_page_response import CompareText2ImgPageResponse
-from .types.compare_upscaler_page_response import CompareUpscalerPageResponse
-from .types.deforum_sd_page_response import DeforumSdPageResponse
-from .types.doc_extract_page_response import DocExtractPageResponse
-from .types.doc_search_page_response import DocSearchPageResponse
-from .types.doc_summary_page_response import DocSummaryPageResponse
-from .types.email_face_inpainting_page_response import EmailFaceInpaintingPageResponse
-from .types.embeddings_page_response import EmbeddingsPageResponse
-from .types.face_inpainting_page_response import FaceInpaintingPageResponse
+from .speech_recognition_translation.client import (
+ AsyncSpeechRecognitionTranslationClient,
+ SpeechRecognitionTranslationClient,
+)
+from .summarize_your_docs_with_gpt.client import AsyncSummarizeYourDocsWithGptClient, SummarizeYourDocsWithGptClient
+from .synthetic_data_maker_for_videos_pd_fs.client import (
+ AsyncSyntheticDataMakerForVideosPdFsClient,
+ SyntheticDataMakerForVideosPdFsClient,
+)
+from .text_guided_audio_generator.client import AsyncTextGuidedAudioGeneratorClient, TextGuidedAudioGeneratorClient
+from .types.animate_request_selected_model import AnimateRequestSelectedModel
+from .types.animation_prompt import AnimationPrompt
+from .types.asr_page_status_response import AsrPageStatusResponse
+from .types.bulk_runner_page_status_response import BulkRunnerPageStatusResponse
+from .types.compare_llm_page_status_response import CompareLlmPageStatusResponse
+from .types.compare_text2img_page_status_response import CompareText2ImgPageStatusResponse
+from .types.compare_upscaler_page_status_response import CompareUpscalerPageStatusResponse
+from .types.deforum_sd_page_status_response import DeforumSdPageStatusResponse
+from .types.doc_extract_page_status_response import DocExtractPageStatusResponse
+from .types.doc_search_page_status_response import DocSearchPageStatusResponse
+from .types.doc_summary_page_status_response import DocSummaryPageStatusResponse
+from .types.doc_summary_request_response_format_type import DocSummaryRequestResponseFormatType
+from .types.doc_summary_request_selected_asr_model import DocSummaryRequestSelectedAsrModel
+from .types.doc_summary_request_selected_model import DocSummaryRequestSelectedModel
+from .types.email_face_inpainting_page_status_response import EmailFaceInpaintingPageStatusResponse
+from .types.embed_request_selected_model import EmbedRequestSelectedModel
+from .types.embeddings_page_status_response import EmbeddingsPageStatusResponse
+from .types.face_inpainting_page_status_response import FaceInpaintingPageStatusResponse
from .types.failed_reponse_model_v2 import FailedReponseModelV2
-from .types.functions_page_response import FunctionsPageResponse
from .types.generic_error_response import GenericErrorResponse
-from .types.google_gpt_page_response import GoogleGptPageResponse
-from .types.google_image_gen_page_response import GoogleImageGenPageResponse
+from .types.google_gpt_page_status_response import GoogleGptPageStatusResponse
+from .types.google_image_gen_page_status_response import GoogleImageGenPageStatusResponse
from .types.http_validation_error import HttpValidationError
-from .types.image_segmentation_page_response import ImageSegmentationPageResponse
-from .types.img2img_page_response import Img2ImgPageResponse
-from .types.letter_writer_page_response import LetterWriterPageResponse
-from .types.lipsync_page_response import LipsyncPageResponse
-from .types.lipsync_tts_page_response import LipsyncTtsPageResponse
-from .types.object_inpainting_page_response import ObjectInpaintingPageResponse
-from .types.qr_code_generator_page_response import QrCodeGeneratorPageResponse
-from .types.related_qn_a_doc_page_response import RelatedQnADocPageResponse
-from .types.related_qn_a_page_response import RelatedQnAPageResponse
-from .types.seo_summary_page_response import SeoSummaryPageResponse
-from .types.smart_gpt_page_response import SmartGptPageResponse
-from .types.social_lookup_email_page_response import SocialLookupEmailPageResponse
-from .types.text2audio_page_response import Text2AudioPageResponse
-from .types.text_to_speech_page_response import TextToSpeechPageResponse
-from .types.translation_page_response import TranslationPageResponse
-from .types.video_bots_page_response import VideoBotsPageResponse
+from .types.image_from_email_request_selected_model import ImageFromEmailRequestSelectedModel
+from .types.image_from_web_search_request_selected_model import ImageFromWebSearchRequestSelectedModel
+from .types.image_segmentation_page_status_response import ImageSegmentationPageStatusResponse
+from .types.img2img_page_status_response import Img2ImgPageStatusResponse
+from .types.lipsync_tts_page_status_response import LipsyncTtsPageStatusResponse
+from .types.lipsync_tts_request_openai_tts_model import LipsyncTtsRequestOpenaiTtsModel
+from .types.lipsync_tts_request_openai_voice_name import LipsyncTtsRequestOpenaiVoiceName
+from .types.lipsync_tts_request_selected_model import LipsyncTtsRequestSelectedModel
+from .types.lipsync_tts_request_tts_provider import LipsyncTtsRequestTtsProvider
+from .types.llm_request_response_format_type import LlmRequestResponseFormatType
+from .types.llm_request_selected_models_item import LlmRequestSelectedModelsItem
+from .types.object_inpainting_page_status_response import ObjectInpaintingPageStatusResponse
+from .types.personalize_email_request_response_format_type import PersonalizeEmailRequestResponseFormatType
+from .types.personalize_email_request_selected_model import PersonalizeEmailRequestSelectedModel
+from .types.portrait_request_selected_model import PortraitRequestSelectedModel
+from .types.product_image_request_selected_model import ProductImageRequestSelectedModel
+from .types.qr_code_generator_page_status_response import QrCodeGeneratorPageStatusResponse
+from .types.qr_code_request_image_prompt_controlnet_models_item import QrCodeRequestImagePromptControlnetModelsItem
+from .types.qr_code_request_scheduler import QrCodeRequestScheduler
+from .types.qr_code_request_selected_controlnet_model_item import QrCodeRequestSelectedControlnetModelItem
+from .types.qr_code_request_selected_model import QrCodeRequestSelectedModel
+from .types.rag_request_citation_style import RagRequestCitationStyle
+from .types.rag_request_embedding_model import RagRequestEmbeddingModel
+from .types.rag_request_keyword_query import RagRequestKeywordQuery
+from .types.rag_request_response_format_type import RagRequestResponseFormatType
+from .types.rag_request_selected_model import RagRequestSelectedModel
+from .types.recipe_function import RecipeFunction
+from .types.related_qn_a_doc_page_status_response import RelatedQnADocPageStatusResponse
+from .types.related_qn_a_page_status_response import RelatedQnAPageStatusResponse
+from .types.remix_image_request_selected_controlnet_model import RemixImageRequestSelectedControlnetModel
+from .types.remix_image_request_selected_model import RemixImageRequestSelectedModel
+from .types.remove_background_request_selected_model import RemoveBackgroundRequestSelectedModel
+from .types.run_settings import RunSettings
+from .types.sad_talker_settings import SadTalkerSettings
+from .types.seo_content_request_response_format_type import SeoContentRequestResponseFormatType
+from .types.seo_content_request_selected_model import SeoContentRequestSelectedModel
+from .types.seo_people_also_ask_doc_request_citation_style import SeoPeopleAlsoAskDocRequestCitationStyle
+from .types.seo_people_also_ask_doc_request_embedding_model import SeoPeopleAlsoAskDocRequestEmbeddingModel
+from .types.seo_people_also_ask_doc_request_keyword_query import SeoPeopleAlsoAskDocRequestKeywordQuery
+from .types.seo_people_also_ask_doc_request_response_format_type import SeoPeopleAlsoAskDocRequestResponseFormatType
+from .types.seo_people_also_ask_doc_request_selected_model import SeoPeopleAlsoAskDocRequestSelectedModel
+from .types.seo_people_also_ask_request_embedding_model import SeoPeopleAlsoAskRequestEmbeddingModel
+from .types.seo_people_also_ask_request_response_format_type import SeoPeopleAlsoAskRequestResponseFormatType
+from .types.seo_people_also_ask_request_selected_model import SeoPeopleAlsoAskRequestSelectedModel
+from .types.seo_summary_page_status_response import SeoSummaryPageStatusResponse
+from .types.serp_search_location import SerpSearchLocation
+from .types.serp_search_type import SerpSearchType
+from .types.social_lookup_email_page_status_response import SocialLookupEmailPageStatusResponse
+from .types.speech_recognition_request_output_format import SpeechRecognitionRequestOutputFormat
+from .types.speech_recognition_request_selected_model import SpeechRecognitionRequestSelectedModel
+from .types.speech_recognition_request_translation_model import SpeechRecognitionRequestTranslationModel
+from .types.synthesize_data_request_response_format_type import SynthesizeDataRequestResponseFormatType
+from .types.synthesize_data_request_selected_asr_model import SynthesizeDataRequestSelectedAsrModel
+from .types.synthesize_data_request_selected_model import SynthesizeDataRequestSelectedModel
+from .types.text2audio_page_status_response import Text2AudioPageStatusResponse
+from .types.text_to_image_request_scheduler import TextToImageRequestScheduler
+from .types.text_to_image_request_selected_models_item import TextToImageRequestSelectedModelsItem
+from .types.text_to_speech_page_status_response import TextToSpeechPageStatusResponse
+from .types.text_to_speech_request_openai_tts_model import TextToSpeechRequestOpenaiTtsModel
+from .types.text_to_speech_request_openai_voice_name import TextToSpeechRequestOpenaiVoiceName
+from .types.text_to_speech_request_tts_provider import TextToSpeechRequestTtsProvider
+from .types.translate_request_selected_model import TranslateRequestSelectedModel
+from .types.translation_page_status_response import TranslationPageStatusResponse
+from .types.upscale_request_selected_models_item import UpscaleRequestSelectedModelsItem
+from .types.vcard import Vcard
+from .types.web_search_llm_request_embedding_model import WebSearchLlmRequestEmbeddingModel
+from .types.web_search_llm_request_response_format_type import WebSearchLlmRequestResponseFormatType
+from .types.web_search_llm_request_selected_model import WebSearchLlmRequestSelectedModel
+from .web_search_gpt3.client import AsyncWebSearchGpt3Client, WebSearchGpt3Client
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
class Gooey:
@@ -151,45 +236,165 @@ def __init__(
)
self.copilot_integrations = CopilotIntegrationsClient(client_wrapper=self._client_wrapper)
self.copilot_for_your_enterprise = CopilotForYourEnterpriseClient(client_wrapper=self._client_wrapper)
+ self.ai_animation_generator = AiAnimationGeneratorClient(client_wrapper=self._client_wrapper)
+ self.ai_art_qr_code = AiArtQrCodeClient(client_wrapper=self._client_wrapper)
+ self.generate_people_also_ask_seo_content = GeneratePeopleAlsoAskSeoContentClient(
+ client_wrapper=self._client_wrapper
+ )
+ self.create_a_perfect_seo_optimized_title_paragraph = CreateAPerfectSeoOptimizedTitleParagraphClient(
+ client_wrapper=self._client_wrapper
+ )
+ self.web_search_gpt3 = WebSearchGpt3Client(client_wrapper=self._client_wrapper)
+ self.profile_lookup_gpt3for_ai_personalized_emails = ProfileLookupGpt3ForAiPersonalizedEmailsClient(
+ client_wrapper=self._client_wrapper
+ )
+ self.bulk_runner = BulkRunnerClient(client_wrapper=self._client_wrapper)
self.evaluator = EvaluatorClient(client_wrapper=self._client_wrapper)
+ self.synthetic_data_maker_for_videos_pd_fs = SyntheticDataMakerForVideosPdFsClient(
+ client_wrapper=self._client_wrapper
+ )
+ self.large_language_models_gpt3 = LargeLanguageModelsGpt3Client(client_wrapper=self._client_wrapper)
+ self.search_your_docs_with_gpt = SearchYourDocsWithGptClient(client_wrapper=self._client_wrapper)
self.smart_gpt = SmartGptClient(client_wrapper=self._client_wrapper)
+ self.summarize_your_docs_with_gpt = SummarizeYourDocsWithGptClient(client_wrapper=self._client_wrapper)
self.functions = FunctionsClient(client_wrapper=self._client_wrapper)
self.lip_syncing = LipSyncingClient(client_wrapper=self._client_wrapper)
- self.misc = MiscClient(client_wrapper=self._client_wrapper)
- self.bulk_runner = BulkRunnerClient(client_wrapper=self._client_wrapper)
+ self.lipsync_video_with_any_text = LipsyncVideoWithAnyTextClient(client_wrapper=self._client_wrapper)
+ self.compare_ai_voice_generators = CompareAiVoiceGeneratorsClient(client_wrapper=self._client_wrapper)
+ self.speech_recognition_translation = SpeechRecognitionTranslationClient(client_wrapper=self._client_wrapper)
+ self.text_guided_audio_generator = TextGuidedAudioGeneratorClient(client_wrapper=self._client_wrapper)
+ self.compare_ai_translations = CompareAiTranslationsClient(client_wrapper=self._client_wrapper)
+ self.edit_an_image_with_ai_prompt = EditAnImageWithAiPromptClient(client_wrapper=self._client_wrapper)
+ self.compare_ai_image_generators = CompareAiImageGeneratorsClient(client_wrapper=self._client_wrapper)
+ self.generate_product_photo_backgrounds = GenerateProductPhotoBackgroundsClient(
+ client_wrapper=self._client_wrapper
+ )
+ self.ai_image_with_a_face = AiImageWithAFaceClient(client_wrapper=self._client_wrapper)
+ self.ai_generated_photo_from_email_profile_lookup = AiGeneratedPhotoFromEmailProfileLookupClient(
+ client_wrapper=self._client_wrapper
+ )
+ self.render_image_search_results_with_ai = RenderImageSearchResultsWithAiClient(
+ client_wrapper=self._client_wrapper
+ )
+ self.ai_background_changer = AiBackgroundChangerClient(client_wrapper=self._client_wrapper)
+ self.compare_ai_image_upscalers = CompareAiImageUpscalersClient(client_wrapper=self._client_wrapper)
+ self.chyron_plant_bot = ChyronPlantBotClient(client_wrapper=self._client_wrapper)
+ self.letter_writer = LetterWriterClient(client_wrapper=self._client_wrapper)
self.embeddings = EmbeddingsClient(client_wrapper=self._client_wrapper)
+ self.people_also_ask_answers_from_a_doc = PeopleAlsoAskAnswersFromADocClient(
+ client_wrapper=self._client_wrapper
+ )
+ self.misc = MiscClient(client_wrapper=self._client_wrapper)
def animate(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormDeforumSd:
+ self,
+ *,
+ animation_prompts: typing.List[AnimationPrompt],
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ max_frames: typing.Optional[int] = None,
+ selected_model: typing.Optional[AnimateRequestSelectedModel] = None,
+ animation_mode: typing.Optional[str] = None,
+ zoom: typing.Optional[str] = None,
+ translation_x: typing.Optional[str] = None,
+ translation_y: typing.Optional[str] = None,
+ rotation3d_x: typing.Optional[str] = None,
+ rotation3d_y: typing.Optional[str] = None,
+ rotation3d_z: typing.Optional[str] = None,
+ fps: typing.Optional[int] = None,
+ seed: typing.Optional[int] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> DeforumSdPageStatusResponse:
"""
Parameters
----------
+ animation_prompts : typing.List[AnimationPrompt]
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ max_frames : typing.Optional[int]
+
+ selected_model : typing.Optional[AnimateRequestSelectedModel]
+
+ animation_mode : typing.Optional[str]
+
+ zoom : typing.Optional[str]
+
+ translation_x : typing.Optional[str]
+
+ translation_y : typing.Optional[str]
+
+ rotation3d_x : typing.Optional[str]
+
+ rotation3d_y : typing.Optional[str]
+
+ rotation3d_z : typing.Optional[str]
+
+ fps : typing.Optional[int]
+
+ seed : typing.Optional[int]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormDeforumSd
+ DeforumSdPageStatusResponse
Successful Response
Examples
--------
- from gooey import Gooey
+ from gooey import AnimationPrompt, Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.animate()
+ client.animate(
+ animation_prompts=[
+ AnimationPrompt(
+ frame="frame",
+ prompt="prompt",
+ )
+ ],
+ )
"""
_response = self._client_wrapper.httpx_client.request(
- "v3/DeforumSD/async/form", method="POST", params={"example_id": example_id}, request_options=request_options
+ "v3/DeforumSD/async/form",
+ method="POST",
+ params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "animation_prompts": animation_prompts,
+ "max_frames": max_frames,
+ "selected_model": selected_model,
+ "animation_mode": animation_mode,
+ "zoom": zoom,
+ "translation_x": translation_x,
+ "translation_y": translation_y,
+ "rotation_3d_x": rotation3d_x,
+ "rotation_3d_y": rotation3d_y,
+ "rotation_3d_z": rotation3d_z,
+ "fps": fps,
+ "seed": seed,
+ "settings": settings,
+ },
+ files={},
+ request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormDeforumSd, parse_obj_as(type_=BodyAsyncFormDeforumSd, object_=_response.json())) # type: ignore
+ return typing.cast(DeforumSdPageStatusResponse, parse_obj_as(type_=DeforumSdPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -216,19 +421,112 @@ def animate(
raise ApiError(status_code=_response.status_code, body=_response_json)
def qr_code(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormArtQrCode:
+ self,
+ *,
+ text_prompt: str,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ qr_code_data: typing.Optional[str] = None,
+ qr_code_input_image: typing.Optional[str] = None,
+ qr_code_vcard: typing.Optional[Vcard] = None,
+ qr_code_file: typing.Optional[str] = None,
+ use_url_shortener: typing.Optional[bool] = None,
+ negative_prompt: typing.Optional[str] = None,
+ image_prompt: typing.Optional[str] = None,
+ image_prompt_controlnet_models: typing.Optional[
+ typing.List[QrCodeRequestImagePromptControlnetModelsItem]
+ ] = None,
+ image_prompt_strength: typing.Optional[float] = None,
+ image_prompt_scale: typing.Optional[float] = None,
+ image_prompt_pos_x: typing.Optional[float] = None,
+ image_prompt_pos_y: typing.Optional[float] = None,
+ selected_model: typing.Optional[QrCodeRequestSelectedModel] = None,
+ selected_controlnet_model: typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]] = None,
+ output_width: typing.Optional[int] = None,
+ output_height: typing.Optional[int] = None,
+ guidance_scale: typing.Optional[float] = None,
+ controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[int] = None,
+ scheduler: typing.Optional[QrCodeRequestScheduler] = None,
+ seed: typing.Optional[int] = None,
+ obj_scale: typing.Optional[float] = None,
+ obj_pos_x: typing.Optional[float] = None,
+ obj_pos_y: typing.Optional[float] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> QrCodeGeneratorPageStatusResponse:
"""
Parameters
----------
+ text_prompt : str
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ qr_code_data : typing.Optional[str]
+
+ qr_code_input_image : typing.Optional[str]
+
+ qr_code_vcard : typing.Optional[Vcard]
+
+ qr_code_file : typing.Optional[str]
+
+ use_url_shortener : typing.Optional[bool]
+
+ negative_prompt : typing.Optional[str]
+
+ image_prompt : typing.Optional[str]
+
+ image_prompt_controlnet_models : typing.Optional[typing.List[QrCodeRequestImagePromptControlnetModelsItem]]
+
+ image_prompt_strength : typing.Optional[float]
+
+ image_prompt_scale : typing.Optional[float]
+
+ image_prompt_pos_x : typing.Optional[float]
+
+ image_prompt_pos_y : typing.Optional[float]
+
+ selected_model : typing.Optional[QrCodeRequestSelectedModel]
+
+ selected_controlnet_model : typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]]
+
+ output_width : typing.Optional[int]
+
+ output_height : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ controlnet_conditioning_scale : typing.Optional[typing.List[float]]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ scheduler : typing.Optional[QrCodeRequestScheduler]
+
+ seed : typing.Optional[int]
+
+ obj_scale : typing.Optional[float]
+
+ obj_pos_x : typing.Optional[float]
+
+ obj_pos_y : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormArtQrCode
+ QrCodeGeneratorPageStatusResponse
Successful Response
Examples
@@ -238,17 +536,52 @@ def qr_code(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.qr_code()
+ client.qr_code(
+ text_prompt="text_prompt",
+ )
"""
_response = self._client_wrapper.httpx_client.request(
"v3/art-qr-code/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "qr_code_data": qr_code_data,
+ "qr_code_input_image": qr_code_input_image,
+ "qr_code_vcard": qr_code_vcard,
+ "qr_code_file": qr_code_file,
+ "use_url_shortener": use_url_shortener,
+ "text_prompt": text_prompt,
+ "negative_prompt": negative_prompt,
+ "image_prompt": image_prompt,
+ "image_prompt_controlnet_models": image_prompt_controlnet_models,
+ "image_prompt_strength": image_prompt_strength,
+ "image_prompt_scale": image_prompt_scale,
+ "image_prompt_pos_x": image_prompt_pos_x,
+ "image_prompt_pos_y": image_prompt_pos_y,
+ "selected_model": selected_model,
+ "selected_controlnet_model": selected_controlnet_model,
+ "output_width": output_width,
+ "output_height": output_height,
+ "guidance_scale": guidance_scale,
+ "controlnet_conditioning_scale": controlnet_conditioning_scale,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "scheduler": scheduler,
+ "seed": seed,
+ "obj_scale": obj_scale,
+ "obj_pos_x": obj_pos_x,
+ "obj_pos_y": obj_pos_y,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormArtQrCode, parse_obj_as(type_=BodyAsyncFormArtQrCode, object_=_response.json())) # type: ignore
+ return typing.cast(QrCodeGeneratorPageStatusResponse, parse_obj_as(type_=QrCodeGeneratorPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -275,19 +608,101 @@ def qr_code(
raise ApiError(status_code=_response.status_code, body=_response_json)
def seo_people_also_ask(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormRelatedQnaMaker:
+ self,
+ *,
+ search_query: str,
+ site_filter: str,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ task_instructions: typing.Optional[str] = None,
+ query_instructions: typing.Optional[str] = None,
+ selected_model: typing.Optional[SeoPeopleAlsoAskRequestSelectedModel] = None,
+ max_search_urls: typing.Optional[int] = None,
+ max_references: typing.Optional[int] = None,
+ max_context_words: typing.Optional[int] = None,
+ scroll_jump: typing.Optional[int] = None,
+ embedding_model: typing.Optional[SeoPeopleAlsoAskRequestEmbeddingModel] = None,
+ dense_weight: typing.Optional[float] = None,
+ avoid_repetition: typing.Optional[bool] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[float] = None,
+ max_tokens: typing.Optional[int] = None,
+ sampling_temperature: typing.Optional[float] = None,
+ response_format_type: typing.Optional[SeoPeopleAlsoAskRequestResponseFormatType] = None,
+ serp_search_location: typing.Optional[SerpSearchLocation] = None,
+ scaleserp_locations: typing.Optional[typing.List[str]] = None,
+ serp_search_type: typing.Optional[SerpSearchType] = None,
+ scaleserp_search_field: typing.Optional[str] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> RelatedQnAPageStatusResponse:
"""
Parameters
----------
+ search_query : str
+
+ site_filter : str
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ task_instructions : typing.Optional[str]
+
+ query_instructions : typing.Optional[str]
+
+ selected_model : typing.Optional[SeoPeopleAlsoAskRequestSelectedModel]
+
+ max_search_urls : typing.Optional[int]
+
+ max_references : typing.Optional[int]
+
+ max_context_words : typing.Optional[int]
+
+ scroll_jump : typing.Optional[int]
+
+ embedding_model : typing.Optional[SeoPeopleAlsoAskRequestEmbeddingModel]
+
+ dense_weight : typing.Optional[float]
+
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ response_format_type : typing.Optional[SeoPeopleAlsoAskRequestResponseFormatType]
+
+ serp_search_location : typing.Optional[SerpSearchLocation]
+
+ scaleserp_locations : typing.Optional[typing.List[str]]
+ DEPRECATED: use `serp_search_location` instead
+
+ serp_search_type : typing.Optional[SerpSearchType]
+
+ scaleserp_search_field : typing.Optional[str]
+ DEPRECATED: use `serp_search_type` instead
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormRelatedQnaMaker
+ RelatedQnAPageStatusResponse
Successful Response
Examples
@@ -297,17 +712,48 @@ def seo_people_also_ask(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.seo_people_also_ask()
+ client.seo_people_also_ask(
+ search_query="search_query",
+ site_filter="site_filter",
+ )
"""
_response = self._client_wrapper.httpx_client.request(
"v3/related-qna-maker/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "search_query": search_query,
+ "site_filter": site_filter,
+ "task_instructions": task_instructions,
+ "query_instructions": query_instructions,
+ "selected_model": selected_model,
+ "max_search_urls": max_search_urls,
+ "max_references": max_references,
+ "max_context_words": max_context_words,
+ "scroll_jump": scroll_jump,
+ "embedding_model": embedding_model,
+ "dense_weight": dense_weight,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "response_format_type": response_format_type,
+ "serp_search_location": serp_search_location,
+ "scaleserp_locations": scaleserp_locations,
+ "serp_search_type": serp_search_type,
+ "scaleserp_search_field": scaleserp_search_field,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormRelatedQnaMaker, parse_obj_as(type_=BodyAsyncFormRelatedQnaMaker, object_=_response.json())) # type: ignore
+ return typing.cast(RelatedQnAPageStatusResponse, parse_obj_as(type_=RelatedQnAPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -334,19 +780,87 @@ def seo_people_also_ask(
raise ApiError(status_code=_response.status_code, body=_response_json)
def seo_content(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormSeoSummary:
+ self,
+ *,
+ search_query: str,
+ keywords: str,
+ title: str,
+ company_url: str,
+ example_id: typing.Optional[str] = None,
+ task_instructions: typing.Optional[str] = None,
+ enable_html: typing.Optional[bool] = None,
+ selected_model: typing.Optional[SeoContentRequestSelectedModel] = None,
+ max_search_urls: typing.Optional[int] = None,
+ enable_crosslinks: typing.Optional[bool] = None,
+ seed: typing.Optional[int] = None,
+ avoid_repetition: typing.Optional[bool] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[float] = None,
+ max_tokens: typing.Optional[int] = None,
+ sampling_temperature: typing.Optional[float] = None,
+ response_format_type: typing.Optional[SeoContentRequestResponseFormatType] = None,
+ serp_search_location: typing.Optional[SerpSearchLocation] = None,
+ scaleserp_locations: typing.Optional[typing.List[str]] = None,
+ serp_search_type: typing.Optional[SerpSearchType] = None,
+ scaleserp_search_field: typing.Optional[str] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> SeoSummaryPageStatusResponse:
"""
Parameters
----------
+ search_query : str
+
+ keywords : str
+
+ title : str
+
+ company_url : str
+
example_id : typing.Optional[str]
+ task_instructions : typing.Optional[str]
+
+ enable_html : typing.Optional[bool]
+
+ selected_model : typing.Optional[SeoContentRequestSelectedModel]
+
+ max_search_urls : typing.Optional[int]
+
+ enable_crosslinks : typing.Optional[bool]
+
+ seed : typing.Optional[int]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ response_format_type : typing.Optional[SeoContentRequestResponseFormatType]
+
+ serp_search_location : typing.Optional[SerpSearchLocation]
+
+ scaleserp_locations : typing.Optional[typing.List[str]]
+ DEPRECATED: use `serp_search_location` instead
+
+ serp_search_type : typing.Optional[SerpSearchType]
+
+ scaleserp_search_field : typing.Optional[str]
+ DEPRECATED: use `serp_search_type` instead
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormSeoSummary
+ SeoSummaryPageStatusResponse
Successful Response
Examples
@@ -356,17 +870,47 @@ def seo_content(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.seo_content()
+ client.seo_content(
+ search_query="search_query",
+ keywords="keywords",
+ title="title",
+ company_url="company_url",
+ )
"""
_response = self._client_wrapper.httpx_client.request(
"v3/SEOSummary/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "search_query": search_query,
+ "keywords": keywords,
+ "title": title,
+ "company_url": company_url,
+ "task_instructions": task_instructions,
+ "enable_html": enable_html,
+ "selected_model": selected_model,
+ "max_search_urls": max_search_urls,
+ "enable_crosslinks": enable_crosslinks,
+ "seed": seed,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "response_format_type": response_format_type,
+ "serp_search_location": serp_search_location,
+ "scaleserp_locations": scaleserp_locations,
+ "serp_search_type": serp_search_type,
+ "scaleserp_search_field": scaleserp_search_field,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormSeoSummary, parse_obj_as(type_=BodyAsyncFormSeoSummary, object_=_response.json())) # type: ignore
+ return typing.cast(SeoSummaryPageStatusResponse, parse_obj_as(type_=SeoSummaryPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -393,19 +937,101 @@ def seo_content(
raise ApiError(status_code=_response.status_code, body=_response_json)
def web_search_llm(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormGoogleGpt:
+ self,
+ *,
+ search_query: str,
+ site_filter: str,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ task_instructions: typing.Optional[str] = None,
+ query_instructions: typing.Optional[str] = None,
+ selected_model: typing.Optional[WebSearchLlmRequestSelectedModel] = None,
+ max_search_urls: typing.Optional[int] = None,
+ max_references: typing.Optional[int] = None,
+ max_context_words: typing.Optional[int] = None,
+ scroll_jump: typing.Optional[int] = None,
+ embedding_model: typing.Optional[WebSearchLlmRequestEmbeddingModel] = None,
+ dense_weight: typing.Optional[float] = None,
+ avoid_repetition: typing.Optional[bool] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[float] = None,
+ max_tokens: typing.Optional[int] = None,
+ sampling_temperature: typing.Optional[float] = None,
+ response_format_type: typing.Optional[WebSearchLlmRequestResponseFormatType] = None,
+ serp_search_location: typing.Optional[SerpSearchLocation] = None,
+ scaleserp_locations: typing.Optional[typing.List[str]] = None,
+ serp_search_type: typing.Optional[SerpSearchType] = None,
+ scaleserp_search_field: typing.Optional[str] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> GoogleGptPageStatusResponse:
"""
Parameters
----------
+ search_query : str
+
+ site_filter : str
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ task_instructions : typing.Optional[str]
+
+ query_instructions : typing.Optional[str]
+
+ selected_model : typing.Optional[WebSearchLlmRequestSelectedModel]
+
+ max_search_urls : typing.Optional[int]
+
+ max_references : typing.Optional[int]
+
+ max_context_words : typing.Optional[int]
+
+ scroll_jump : typing.Optional[int]
+
+ embedding_model : typing.Optional[WebSearchLlmRequestEmbeddingModel]
+
+ dense_weight : typing.Optional[float]
+
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ response_format_type : typing.Optional[WebSearchLlmRequestResponseFormatType]
+
+ serp_search_location : typing.Optional[SerpSearchLocation]
+
+ scaleserp_locations : typing.Optional[typing.List[str]]
+ DEPRECATED: use `serp_search_location` instead
+
+ serp_search_type : typing.Optional[SerpSearchType]
+
+ scaleserp_search_field : typing.Optional[str]
+ DEPRECATED: use `serp_search_type` instead
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormGoogleGpt
+ GoogleGptPageStatusResponse
Successful Response
Examples
@@ -415,17 +1041,48 @@ def web_search_llm(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.web_search_llm()
+ client.web_search_llm(
+ search_query="search_query",
+ site_filter="site_filter",
+ )
"""
_response = self._client_wrapper.httpx_client.request(
"v3/google-gpt/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "search_query": search_query,
+ "site_filter": site_filter,
+ "task_instructions": task_instructions,
+ "query_instructions": query_instructions,
+ "selected_model": selected_model,
+ "max_search_urls": max_search_urls,
+ "max_references": max_references,
+ "max_context_words": max_context_words,
+ "scroll_jump": scroll_jump,
+ "embedding_model": embedding_model,
+ "dense_weight": dense_weight,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "response_format_type": response_format_type,
+ "serp_search_location": serp_search_location,
+ "scaleserp_locations": scaleserp_locations,
+ "serp_search_type": serp_search_type,
+ "scaleserp_search_field": scaleserp_search_field,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormGoogleGpt, parse_obj_as(type_=BodyAsyncFormGoogleGpt, object_=_response.json())) # type: ignore
+ return typing.cast(GoogleGptPageStatusResponse, parse_obj_as(type_=GoogleGptPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -452,19 +1109,59 @@ def web_search_llm(
raise ApiError(status_code=_response.status_code, body=_response_json)
def personalize_email(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormSocialLookupEmail:
+ self,
+ *,
+ email_address: str,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ input_prompt: typing.Optional[str] = None,
+ selected_model: typing.Optional[PersonalizeEmailRequestSelectedModel] = None,
+ avoid_repetition: typing.Optional[bool] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[float] = None,
+ max_tokens: typing.Optional[int] = None,
+ sampling_temperature: typing.Optional[float] = None,
+ response_format_type: typing.Optional[PersonalizeEmailRequestResponseFormatType] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> SocialLookupEmailPageStatusResponse:
"""
Parameters
----------
+ email_address : str
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ input_prompt : typing.Optional[str]
+
+ selected_model : typing.Optional[PersonalizeEmailRequestSelectedModel]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ response_format_type : typing.Optional[PersonalizeEmailRequestResponseFormatType]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormSocialLookupEmail
+ SocialLookupEmailPageStatusResponse
Successful Response
Examples
@@ -474,17 +1171,35 @@ def personalize_email(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.personalize_email()
+ client.personalize_email(
+ email_address="email_address",
+ )
"""
_response = self._client_wrapper.httpx_client.request(
"v3/SocialLookupEmail/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "email_address": email_address,
+ "input_prompt": input_prompt,
+ "selected_model": selected_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "response_format_type": response_format_type,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormSocialLookupEmail, parse_obj_as(type_=BodyAsyncFormSocialLookupEmail, object_=_response.json())) # type: ignore
+ return typing.cast(SocialLookupEmailPageStatusResponse, parse_obj_as(type_=SocialLookupEmailPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -511,19 +1226,65 @@ def personalize_email(
raise ApiError(status_code=_response.status_code, body=_response_json)
def bulk_run(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormBulkRunner:
+ self,
+ *,
+ documents: typing.List[str],
+ run_urls: typing.List[str],
+ input_columns: typing.Dict[str, str],
+ output_columns: typing.Dict[str, str],
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ eval_urls: typing.Optional[typing.List[str]] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> BulkRunnerPageStatusResponse:
"""
Parameters
----------
+ documents : typing.List[str]
+
+ Upload or link to a CSV or google sheet that contains your sample input data.
+ For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs.
+ Remember to includes header names in your CSV too.
+
+
+ run_urls : typing.List[str]
+
+ Provide one or more Gooey.AI workflow runs.
+ You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them.
+
+
+ input_columns : typing.Dict[str, str]
+
+ For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it.
+
+
+ output_columns : typing.Dict[str, str]
+
+ For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data.
+
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ eval_urls : typing.Optional[typing.List[str]]
+
+ _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs.
+
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormBulkRunner
+ BulkRunnerPageStatusResponse
Successful Response
Examples
@@ -533,17 +1294,34 @@ def bulk_run(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.bulk_run()
+ client.bulk_run(
+ documents=["documents"],
+ run_urls=["run_urls"],
+ input_columns={"key": "value"},
+ output_columns={"key": "value"},
+ )
"""
_response = self._client_wrapper.httpx_client.request(
"v3/bulk-runner/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "documents": documents,
+ "run_urls": run_urls,
+ "input_columns": input_columns,
+ "output_columns": output_columns,
+ "eval_urls": eval_urls,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormBulkRunner, parse_obj_as(type_=BodyAsyncFormBulkRunner, object_=_response.json())) # type: ignore
+ return typing.cast(BulkRunnerPageStatusResponse, parse_obj_as(type_=BulkRunnerPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -570,19 +1348,73 @@ def bulk_run(
raise ApiError(status_code=_response.status_code, body=_response_json)
def synthesize_data(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormDocExtract:
+ self,
+ *,
+ documents: typing.List[str],
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ sheet_url: typing.Optional[str] = None,
+ selected_asr_model: typing.Optional[SynthesizeDataRequestSelectedAsrModel] = None,
+ google_translate_target: typing.Optional[str] = None,
+ glossary_document: typing.Optional[str] = None,
+ task_instructions: typing.Optional[str] = None,
+ selected_model: typing.Optional[SynthesizeDataRequestSelectedModel] = None,
+ avoid_repetition: typing.Optional[bool] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[float] = None,
+ max_tokens: typing.Optional[int] = None,
+ sampling_temperature: typing.Optional[float] = None,
+ response_format_type: typing.Optional[SynthesizeDataRequestResponseFormatType] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> DocExtractPageStatusResponse:
"""
Parameters
----------
+ documents : typing.List[str]
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ sheet_url : typing.Optional[str]
+
+ selected_asr_model : typing.Optional[SynthesizeDataRequestSelectedAsrModel]
+
+ google_translate_target : typing.Optional[str]
+
+ glossary_document : typing.Optional[str]
+ Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+ If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+
+ task_instructions : typing.Optional[str]
+
+ selected_model : typing.Optional[SynthesizeDataRequestSelectedModel]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ response_format_type : typing.Optional[SynthesizeDataRequestResponseFormatType]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormDocExtract
+ DocExtractPageStatusResponse
Successful Response
Examples
@@ -592,17 +1424,39 @@ def synthesize_data(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.synthesize_data()
+ client.synthesize_data(
+ documents=["documents"],
+ )
"""
_response = self._client_wrapper.httpx_client.request(
"v3/doc-extract/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "documents": documents,
+ "sheet_url": sheet_url,
+ "selected_asr_model": selected_asr_model,
+ "google_translate_target": google_translate_target,
+ "glossary_document": glossary_document,
+ "task_instructions": task_instructions,
+ "selected_model": selected_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "response_format_type": response_format_type,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormDocExtract, parse_obj_as(type_=BodyAsyncFormDocExtract, object_=_response.json())) # type: ignore
+ return typing.cast(DocExtractPageStatusResponse, parse_obj_as(type_=DocExtractPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -629,19 +1483,56 @@ def synthesize_data(
raise ApiError(status_code=_response.status_code, body=_response_json)
def llm(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormCompareLlm:
+ self,
+ *,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ input_prompt: typing.Optional[str] = None,
+ selected_models: typing.Optional[typing.List[LlmRequestSelectedModelsItem]] = None,
+ avoid_repetition: typing.Optional[bool] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[float] = None,
+ max_tokens: typing.Optional[int] = None,
+ sampling_temperature: typing.Optional[float] = None,
+ response_format_type: typing.Optional[LlmRequestResponseFormatType] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> CompareLlmPageStatusResponse:
"""
Parameters
----------
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ input_prompt : typing.Optional[str]
+
+ selected_models : typing.Optional[typing.List[LlmRequestSelectedModelsItem]]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ response_format_type : typing.Optional[LlmRequestResponseFormatType]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormCompareLlm
+ CompareLlmPageStatusResponse
Successful Response
Examples
@@ -657,11 +1548,26 @@ def llm(
"v3/CompareLLM/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "input_prompt": input_prompt,
+ "selected_models": selected_models,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "response_format_type": response_format_type,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormCompareLlm, parse_obj_as(type_=BodyAsyncFormCompareLlm, object_=_response.json())) # type: ignore
+ return typing.cast(CompareLlmPageStatusResponse, parse_obj_as(type_=CompareLlmPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -688,19 +1594,93 @@ def llm(
raise ApiError(status_code=_response.status_code, body=_response_json)
def rag(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormDocSearch:
+ self,
+ *,
+ search_query: str,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ keyword_query: typing.Optional[RagRequestKeywordQuery] = None,
+ documents: typing.Optional[typing.List[str]] = None,
+ max_references: typing.Optional[int] = None,
+ max_context_words: typing.Optional[int] = None,
+ scroll_jump: typing.Optional[int] = None,
+ doc_extract_url: typing.Optional[str] = None,
+ embedding_model: typing.Optional[RagRequestEmbeddingModel] = None,
+ dense_weight: typing.Optional[float] = None,
+ task_instructions: typing.Optional[str] = None,
+ query_instructions: typing.Optional[str] = None,
+ selected_model: typing.Optional[RagRequestSelectedModel] = None,
+ citation_style: typing.Optional[RagRequestCitationStyle] = None,
+ avoid_repetition: typing.Optional[bool] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[float] = None,
+ max_tokens: typing.Optional[int] = None,
+ sampling_temperature: typing.Optional[float] = None,
+ response_format_type: typing.Optional[RagRequestResponseFormatType] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> DocSearchPageStatusResponse:
"""
Parameters
----------
+ search_query : str
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ keyword_query : typing.Optional[RagRequestKeywordQuery]
+
+ documents : typing.Optional[typing.List[str]]
+
+ max_references : typing.Optional[int]
+
+ max_context_words : typing.Optional[int]
+
+ scroll_jump : typing.Optional[int]
+
+ doc_extract_url : typing.Optional[str]
+
+ embedding_model : typing.Optional[RagRequestEmbeddingModel]
+
+ dense_weight : typing.Optional[float]
+
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+
+ task_instructions : typing.Optional[str]
+
+ query_instructions : typing.Optional[str]
+
+ selected_model : typing.Optional[RagRequestSelectedModel]
+
+ citation_style : typing.Optional[RagRequestCitationStyle]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ response_format_type : typing.Optional[RagRequestResponseFormatType]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormDocSearch
+ DocSearchPageStatusResponse
Successful Response
Examples
@@ -710,17 +1690,45 @@ def rag(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.rag()
+ client.rag(
+ search_query="search_query",
+ )
"""
_response = self._client_wrapper.httpx_client.request(
"v3/doc-search/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "search_query": search_query,
+ "keyword_query": keyword_query,
+ "documents": documents,
+ "max_references": max_references,
+ "max_context_words": max_context_words,
+ "scroll_jump": scroll_jump,
+ "doc_extract_url": doc_extract_url,
+ "embedding_model": embedding_model,
+ "dense_weight": dense_weight,
+ "task_instructions": task_instructions,
+ "query_instructions": query_instructions,
+ "selected_model": selected_model,
+ "citation_style": citation_style,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "response_format_type": response_format_type,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormDocSearch, parse_obj_as(type_=BodyAsyncFormDocSearch, object_=_response.json())) # type: ignore
+ return typing.cast(DocSearchPageStatusResponse, parse_obj_as(type_=DocSearchPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -747,19 +1755,71 @@ def rag(
raise ApiError(status_code=_response.status_code, body=_response_json)
def doc_summary(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormDocSummary:
+ self,
+ *,
+ documents: typing.List[str],
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ task_instructions: typing.Optional[str] = None,
+ merge_instructions: typing.Optional[str] = None,
+ selected_model: typing.Optional[DocSummaryRequestSelectedModel] = None,
+ chain_type: typing.Optional[typing.Literal["map_reduce"]] = None,
+ selected_asr_model: typing.Optional[DocSummaryRequestSelectedAsrModel] = None,
+ google_translate_target: typing.Optional[str] = None,
+ avoid_repetition: typing.Optional[bool] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[float] = None,
+ max_tokens: typing.Optional[int] = None,
+ sampling_temperature: typing.Optional[float] = None,
+ response_format_type: typing.Optional[DocSummaryRequestResponseFormatType] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> DocSummaryPageStatusResponse:
"""
Parameters
----------
+ documents : typing.List[str]
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ task_instructions : typing.Optional[str]
+
+ merge_instructions : typing.Optional[str]
+
+ selected_model : typing.Optional[DocSummaryRequestSelectedModel]
+
+ chain_type : typing.Optional[typing.Literal["map_reduce"]]
+
+ selected_asr_model : typing.Optional[DocSummaryRequestSelectedAsrModel]
+
+ google_translate_target : typing.Optional[str]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ response_format_type : typing.Optional[DocSummaryRequestResponseFormatType]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormDocSummary
+ DocSummaryPageStatusResponse
Successful Response
Examples
@@ -769,17 +1829,39 @@ def doc_summary(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.doc_summary()
+ client.doc_summary(
+ documents=["documents"],
+ )
"""
_response = self._client_wrapper.httpx_client.request(
"v3/doc-summary/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "documents": documents,
+ "task_instructions": task_instructions,
+ "merge_instructions": merge_instructions,
+ "selected_model": selected_model,
+ "chain_type": chain_type,
+ "selected_asr_model": selected_asr_model,
+ "google_translate_target": google_translate_target,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "response_format_type": response_format_type,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormDocSummary, parse_obj_as(type_=BodyAsyncFormDocSummary, object_=_response.json())) # type: ignore
+ return typing.cast(DocSummaryPageStatusResponse, parse_obj_as(type_=DocSummaryPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -806,19 +1888,111 @@ def doc_summary(
raise ApiError(status_code=_response.status_code, body=_response_json)
def lipsync_tts(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormLipsyncTts:
+ self,
+ *,
+ text_prompt: str,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ tts_provider: typing.Optional[LipsyncTtsRequestTtsProvider] = None,
+ uberduck_voice_name: typing.Optional[str] = None,
+ uberduck_speaking_rate: typing.Optional[float] = None,
+ google_voice_name: typing.Optional[str] = None,
+ google_speaking_rate: typing.Optional[float] = None,
+ google_pitch: typing.Optional[float] = None,
+ bark_history_prompt: typing.Optional[str] = None,
+ elevenlabs_voice_name: typing.Optional[str] = None,
+ elevenlabs_api_key: typing.Optional[str] = None,
+ elevenlabs_voice_id: typing.Optional[str] = None,
+ elevenlabs_model: typing.Optional[str] = None,
+ elevenlabs_stability: typing.Optional[float] = None,
+ elevenlabs_similarity_boost: typing.Optional[float] = None,
+ elevenlabs_style: typing.Optional[float] = None,
+ elevenlabs_speaker_boost: typing.Optional[bool] = None,
+ azure_voice_name: typing.Optional[str] = None,
+ openai_voice_name: typing.Optional[LipsyncTtsRequestOpenaiVoiceName] = None,
+ openai_tts_model: typing.Optional[LipsyncTtsRequestOpenaiTtsModel] = None,
+ input_face: typing.Optional[str] = None,
+ face_padding_top: typing.Optional[int] = None,
+ face_padding_bottom: typing.Optional[int] = None,
+ face_padding_left: typing.Optional[int] = None,
+ face_padding_right: typing.Optional[int] = None,
+ sadtalker_settings: typing.Optional[SadTalkerSettings] = None,
+ selected_model: typing.Optional[LipsyncTtsRequestSelectedModel] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> LipsyncTtsPageStatusResponse:
"""
Parameters
----------
+ text_prompt : str
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ tts_provider : typing.Optional[LipsyncTtsRequestTtsProvider]
+
+ uberduck_voice_name : typing.Optional[str]
+
+ uberduck_speaking_rate : typing.Optional[float]
+
+ google_voice_name : typing.Optional[str]
+
+ google_speaking_rate : typing.Optional[float]
+
+ google_pitch : typing.Optional[float]
+
+ bark_history_prompt : typing.Optional[str]
+
+ elevenlabs_voice_name : typing.Optional[str]
+ Use `elevenlabs_voice_id` instead
+
+ elevenlabs_api_key : typing.Optional[str]
+
+ elevenlabs_voice_id : typing.Optional[str]
+
+ elevenlabs_model : typing.Optional[str]
+
+ elevenlabs_stability : typing.Optional[float]
+
+ elevenlabs_similarity_boost : typing.Optional[float]
+
+ elevenlabs_style : typing.Optional[float]
+
+ elevenlabs_speaker_boost : typing.Optional[bool]
+
+ azure_voice_name : typing.Optional[str]
+
+ openai_voice_name : typing.Optional[LipsyncTtsRequestOpenaiVoiceName]
+
+ openai_tts_model : typing.Optional[LipsyncTtsRequestOpenaiTtsModel]
+
+ input_face : typing.Optional[str]
+
+ face_padding_top : typing.Optional[int]
+
+ face_padding_bottom : typing.Optional[int]
+
+ face_padding_left : typing.Optional[int]
+
+ face_padding_right : typing.Optional[int]
+
+ sadtalker_settings : typing.Optional[SadTalkerSettings]
+
+ selected_model : typing.Optional[LipsyncTtsRequestSelectedModel]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormLipsyncTts
+ LipsyncTtsPageStatusResponse
Successful Response
Examples
@@ -828,17 +2002,52 @@ def lipsync_tts(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.lipsync_tts()
+ client.lipsync_tts(
+ text_prompt="text_prompt",
+ )
"""
_response = self._client_wrapper.httpx_client.request(
"v3/LipsyncTTS/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "text_prompt": text_prompt,
+ "tts_provider": tts_provider,
+ "uberduck_voice_name": uberduck_voice_name,
+ "uberduck_speaking_rate": uberduck_speaking_rate,
+ "google_voice_name": google_voice_name,
+ "google_speaking_rate": google_speaking_rate,
+ "google_pitch": google_pitch,
+ "bark_history_prompt": bark_history_prompt,
+ "elevenlabs_voice_name": elevenlabs_voice_name,
+ "elevenlabs_api_key": elevenlabs_api_key,
+ "elevenlabs_voice_id": elevenlabs_voice_id,
+ "elevenlabs_model": elevenlabs_model,
+ "elevenlabs_stability": elevenlabs_stability,
+ "elevenlabs_similarity_boost": elevenlabs_similarity_boost,
+ "elevenlabs_style": elevenlabs_style,
+ "elevenlabs_speaker_boost": elevenlabs_speaker_boost,
+ "azure_voice_name": azure_voice_name,
+ "openai_voice_name": openai_voice_name,
+ "openai_tts_model": openai_tts_model,
+ "input_face": input_face,
+ "face_padding_top": face_padding_top,
+ "face_padding_bottom": face_padding_bottom,
+ "face_padding_left": face_padding_left,
+ "face_padding_right": face_padding_right,
+ "sadtalker_settings": sadtalker_settings,
+ "selected_model": selected_model,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormLipsyncTts, parse_obj_as(type_=BodyAsyncFormLipsyncTts, object_=_response.json())) # type: ignore
+ return typing.cast(LipsyncTtsPageStatusResponse, parse_obj_as(type_=LipsyncTtsPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -865,19 +2074,90 @@ def lipsync_tts(
raise ApiError(status_code=_response.status_code, body=_response_json)
def text_to_speech(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormTextToSpeech:
+ self,
+ *,
+ text_prompt: str,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ tts_provider: typing.Optional[TextToSpeechRequestTtsProvider] = None,
+ uberduck_voice_name: typing.Optional[str] = None,
+ uberduck_speaking_rate: typing.Optional[float] = None,
+ google_voice_name: typing.Optional[str] = None,
+ google_speaking_rate: typing.Optional[float] = None,
+ google_pitch: typing.Optional[float] = None,
+ bark_history_prompt: typing.Optional[str] = None,
+ elevenlabs_voice_name: typing.Optional[str] = None,
+ elevenlabs_api_key: typing.Optional[str] = None,
+ elevenlabs_voice_id: typing.Optional[str] = None,
+ elevenlabs_model: typing.Optional[str] = None,
+ elevenlabs_stability: typing.Optional[float] = None,
+ elevenlabs_similarity_boost: typing.Optional[float] = None,
+ elevenlabs_style: typing.Optional[float] = None,
+ elevenlabs_speaker_boost: typing.Optional[bool] = None,
+ azure_voice_name: typing.Optional[str] = None,
+ openai_voice_name: typing.Optional[TextToSpeechRequestOpenaiVoiceName] = None,
+ openai_tts_model: typing.Optional[TextToSpeechRequestOpenaiTtsModel] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> TextToSpeechPageStatusResponse:
"""
Parameters
----------
+ text_prompt : str
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ tts_provider : typing.Optional[TextToSpeechRequestTtsProvider]
+
+ uberduck_voice_name : typing.Optional[str]
+
+ uberduck_speaking_rate : typing.Optional[float]
+
+ google_voice_name : typing.Optional[str]
+
+ google_speaking_rate : typing.Optional[float]
+
+ google_pitch : typing.Optional[float]
+
+ bark_history_prompt : typing.Optional[str]
+
+ elevenlabs_voice_name : typing.Optional[str]
+ Use `elevenlabs_voice_id` instead
+
+ elevenlabs_api_key : typing.Optional[str]
+
+ elevenlabs_voice_id : typing.Optional[str]
+
+ elevenlabs_model : typing.Optional[str]
+
+ elevenlabs_stability : typing.Optional[float]
+
+ elevenlabs_similarity_boost : typing.Optional[float]
+
+ elevenlabs_style : typing.Optional[float]
+
+ elevenlabs_speaker_boost : typing.Optional[bool]
+
+ azure_voice_name : typing.Optional[str]
+
+ openai_voice_name : typing.Optional[TextToSpeechRequestOpenaiVoiceName]
+
+ openai_tts_model : typing.Optional[TextToSpeechRequestOpenaiTtsModel]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormTextToSpeech
+ TextToSpeechPageStatusResponse
Successful Response
Examples
@@ -887,17 +2167,45 @@ def text_to_speech(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.text_to_speech()
+ client.text_to_speech(
+ text_prompt="text_prompt",
+ )
"""
_response = self._client_wrapper.httpx_client.request(
"v3/TextToSpeech/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "text_prompt": text_prompt,
+ "tts_provider": tts_provider,
+ "uberduck_voice_name": uberduck_voice_name,
+ "uberduck_speaking_rate": uberduck_speaking_rate,
+ "google_voice_name": google_voice_name,
+ "google_speaking_rate": google_speaking_rate,
+ "google_pitch": google_pitch,
+ "bark_history_prompt": bark_history_prompt,
+ "elevenlabs_voice_name": elevenlabs_voice_name,
+ "elevenlabs_api_key": elevenlabs_api_key,
+ "elevenlabs_voice_id": elevenlabs_voice_id,
+ "elevenlabs_model": elevenlabs_model,
+ "elevenlabs_stability": elevenlabs_stability,
+ "elevenlabs_similarity_boost": elevenlabs_similarity_boost,
+ "elevenlabs_style": elevenlabs_style,
+ "elevenlabs_speaker_boost": elevenlabs_speaker_boost,
+ "azure_voice_name": azure_voice_name,
+ "openai_voice_name": openai_voice_name,
+ "openai_tts_model": openai_tts_model,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormTextToSpeech, parse_obj_as(type_=BodyAsyncFormTextToSpeech, object_=_response.json())) # type: ignore
+ return typing.cast(TextToSpeechPageStatusResponse, parse_obj_as(type_=TextToSpeechPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -924,19 +2232,62 @@ def text_to_speech(
raise ApiError(status_code=_response.status_code, body=_response_json)
def speech_recognition(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormAsr:
+ self,
+ *,
+ documents: typing.List[str],
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ selected_model: typing.Optional[SpeechRecognitionRequestSelectedModel] = None,
+ language: typing.Optional[str] = None,
+ translation_model: typing.Optional[SpeechRecognitionRequestTranslationModel] = None,
+ output_format: typing.Optional[SpeechRecognitionRequestOutputFormat] = None,
+ google_translate_target: typing.Optional[str] = None,
+ translation_source: typing.Optional[str] = None,
+ translation_target: typing.Optional[str] = None,
+ glossary_document: typing.Optional[str] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsrPageStatusResponse:
"""
Parameters
----------
+ documents : typing.List[str]
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ selected_model : typing.Optional[SpeechRecognitionRequestSelectedModel]
+
+ language : typing.Optional[str]
+
+ translation_model : typing.Optional[SpeechRecognitionRequestTranslationModel]
+
+ output_format : typing.Optional[SpeechRecognitionRequestOutputFormat]
+
+ google_translate_target : typing.Optional[str]
+ use `translation_model` & `translation_target` instead.
+
+ translation_source : typing.Optional[str]
+
+ translation_target : typing.Optional[str]
+
+ glossary_document : typing.Optional[str]
+ Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+ If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormAsr
+ AsrPageStatusResponse
Successful Response
Examples
@@ -946,14 +2297,35 @@ def speech_recognition(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.speech_recognition()
+ client.speech_recognition(
+ documents=["documents"],
+ )
"""
_response = self._client_wrapper.httpx_client.request(
- "v3/asr/async/form", method="POST", params={"example_id": example_id}, request_options=request_options
+ "v3/asr/async/form",
+ method="POST",
+ params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "documents": documents,
+ "selected_model": selected_model,
+ "language": language,
+ "translation_model": translation_model,
+ "output_format": output_format,
+ "google_translate_target": google_translate_target,
+ "translation_source": translation_source,
+ "translation_target": translation_target,
+ "glossary_document": glossary_document,
+ "settings": settings,
+ },
+ files={},
+ request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormAsr, parse_obj_as(type_=BodyAsyncFormAsr, object_=_response.json())) # type: ignore
+ return typing.cast(AsrPageStatusResponse, parse_obj_as(type_=AsrPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -980,19 +2352,59 @@ def speech_recognition(
raise ApiError(status_code=_response.status_code, body=_response_json)
def text_to_music(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormText2Audio:
+ self,
+ *,
+ text_prompt: str,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ negative_prompt: typing.Optional[str] = None,
+ duration_sec: typing.Optional[float] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[int] = None,
+ guidance_scale: typing.Optional[float] = None,
+ seed: typing.Optional[int] = None,
+ sd2upscaling: typing.Optional[bool] = None,
+ selected_models: typing.Optional[typing.List[typing.Literal["audio_ldm"]]] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> Text2AudioPageStatusResponse:
"""
Parameters
----------
+ text_prompt : str
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ negative_prompt : typing.Optional[str]
+
+ duration_sec : typing.Optional[float]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ seed : typing.Optional[int]
+
+ sd2upscaling : typing.Optional[bool]
+
+ selected_models : typing.Optional[typing.List[typing.Literal["audio_ldm"]]]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormText2Audio
+ Text2AudioPageStatusResponse
Successful Response
Examples
@@ -1002,17 +2414,35 @@ def text_to_music(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.text_to_music()
+ client.text_to_music(
+ text_prompt="text_prompt",
+ )
"""
_response = self._client_wrapper.httpx_client.request(
"v3/text2audio/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "text_prompt": text_prompt,
+ "negative_prompt": negative_prompt,
+ "duration_sec": duration_sec,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "guidance_scale": guidance_scale,
+ "seed": seed,
+ "sd_2_upscaling": sd2upscaling,
+ "selected_models": selected_models,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormText2Audio, parse_obj_as(type_=BodyAsyncFormText2Audio, object_=_response.json())) # type: ignore
+ return typing.cast(Text2AudioPageStatusResponse, parse_obj_as(type_=Text2AudioPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -1039,19 +2469,49 @@ def text_to_music(
raise ApiError(status_code=_response.status_code, body=_response_json)
def translate(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormTranslate:
+ self,
+ *,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ texts: typing.Optional[typing.List[str]] = None,
+ selected_model: typing.Optional[TranslateRequestSelectedModel] = None,
+ translation_source: typing.Optional[str] = None,
+ translation_target: typing.Optional[str] = None,
+ glossary_document: typing.Optional[str] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> TranslationPageStatusResponse:
"""
Parameters
----------
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ texts : typing.Optional[typing.List[str]]
+
+ selected_model : typing.Optional[TranslateRequestSelectedModel]
+
+ translation_source : typing.Optional[str]
+
+ translation_target : typing.Optional[str]
+
+ glossary_document : typing.Optional[str]
+ Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+ If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormTranslate
+ TranslationPageStatusResponse
Successful Response
Examples
@@ -1064,11 +2524,26 @@ def translate(
client.translate()
"""
_response = self._client_wrapper.httpx_client.request(
- "v3/translate/async/form", method="POST", params={"example_id": example_id}, request_options=request_options
+ "v3/translate/async/form",
+ method="POST",
+ params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "texts": texts,
+ "selected_model": selected_model,
+ "translation_source": translation_source,
+ "translation_target": translation_target,
+ "glossary_document": glossary_document,
+ "settings": settings,
+ },
+ files={},
+ request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormTranslate, parse_obj_as(type_=BodyAsyncFormTranslate, object_=_response.json())) # type: ignore
+ return typing.cast(TranslationPageStatusResponse, parse_obj_as(type_=TranslationPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -1095,19 +2570,74 @@ def translate(
raise ApiError(status_code=_response.status_code, body=_response_json)
def remix_image(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormImg2Img:
+ self,
+ *,
+ input_image: str,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ text_prompt: typing.Optional[str] = None,
+ selected_model: typing.Optional[RemixImageRequestSelectedModel] = None,
+ selected_controlnet_model: typing.Optional[RemixImageRequestSelectedControlnetModel] = None,
+ negative_prompt: typing.Optional[str] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[int] = None,
+ output_width: typing.Optional[int] = None,
+ output_height: typing.Optional[int] = None,
+ guidance_scale: typing.Optional[float] = None,
+ prompt_strength: typing.Optional[float] = None,
+ controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None,
+ seed: typing.Optional[int] = None,
+ image_guidance_scale: typing.Optional[float] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> Img2ImgPageStatusResponse:
"""
Parameters
----------
+ input_image : str
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ text_prompt : typing.Optional[str]
+
+ selected_model : typing.Optional[RemixImageRequestSelectedModel]
+
+ selected_controlnet_model : typing.Optional[RemixImageRequestSelectedControlnetModel]
+
+ negative_prompt : typing.Optional[str]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ output_width : typing.Optional[int]
+
+ output_height : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ prompt_strength : typing.Optional[float]
+
+ controlnet_conditioning_scale : typing.Optional[typing.List[float]]
+
+ seed : typing.Optional[int]
+
+ image_guidance_scale : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormImg2Img
+ Img2ImgPageStatusResponse
Successful Response
Examples
@@ -1117,14 +2647,40 @@ def remix_image(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.remix_image()
+ client.remix_image(
+ input_image="input_image",
+ )
"""
_response = self._client_wrapper.httpx_client.request(
- "v3/Img2Img/async/form", method="POST", params={"example_id": example_id}, request_options=request_options
+ "v3/Img2Img/async/form",
+ method="POST",
+ params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "input_image": input_image,
+ "text_prompt": text_prompt,
+ "selected_model": selected_model,
+ "selected_controlnet_model": selected_controlnet_model,
+ "negative_prompt": negative_prompt,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "output_width": output_width,
+ "output_height": output_height,
+ "guidance_scale": guidance_scale,
+ "prompt_strength": prompt_strength,
+ "controlnet_conditioning_scale": controlnet_conditioning_scale,
+ "seed": seed,
+ "image_guidance_scale": image_guidance_scale,
+ "settings": settings,
+ },
+ files={},
+ request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormImg2Img, parse_obj_as(type_=BodyAsyncFormImg2Img, object_=_response.json())) # type: ignore
+ return typing.cast(Img2ImgPageStatusResponse, parse_obj_as(type_=Img2ImgPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -1151,19 +2707,77 @@ def remix_image(
raise ApiError(status_code=_response.status_code, body=_response_json)
def text_to_image(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormCompareText2Img:
+ self,
+ *,
+ text_prompt: str,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ negative_prompt: typing.Optional[str] = None,
+ output_width: typing.Optional[int] = None,
+ output_height: typing.Optional[int] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[int] = None,
+ dall_e3quality: typing.Optional[str] = None,
+ dall_e3style: typing.Optional[str] = None,
+ guidance_scale: typing.Optional[float] = None,
+ seed: typing.Optional[int] = None,
+ sd2upscaling: typing.Optional[bool] = None,
+ selected_models: typing.Optional[typing.List[TextToImageRequestSelectedModelsItem]] = None,
+ scheduler: typing.Optional[TextToImageRequestScheduler] = None,
+ edit_instruction: typing.Optional[str] = None,
+ image_guidance_scale: typing.Optional[float] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> CompareText2ImgPageStatusResponse:
"""
Parameters
----------
+ text_prompt : str
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ negative_prompt : typing.Optional[str]
+
+ output_width : typing.Optional[int]
+
+ output_height : typing.Optional[int]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ dall_e3quality : typing.Optional[str]
+
+ dall_e3style : typing.Optional[str]
+
+ guidance_scale : typing.Optional[float]
+
+ seed : typing.Optional[int]
+
+ sd2upscaling : typing.Optional[bool]
+
+ selected_models : typing.Optional[typing.List[TextToImageRequestSelectedModelsItem]]
+
+ scheduler : typing.Optional[TextToImageRequestScheduler]
+
+ edit_instruction : typing.Optional[str]
+
+ image_guidance_scale : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormCompareText2Img
+ CompareText2ImgPageStatusResponse
Successful Response
Examples
@@ -1173,17 +2787,41 @@ def text_to_image(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.text_to_image()
+ client.text_to_image(
+ text_prompt="text_prompt",
+ )
"""
_response = self._client_wrapper.httpx_client.request(
"v3/CompareText2Img/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "text_prompt": text_prompt,
+ "negative_prompt": negative_prompt,
+ "output_width": output_width,
+ "output_height": output_height,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "dall_e_3_quality": dall_e3quality,
+ "dall_e_3_style": dall_e3style,
+ "guidance_scale": guidance_scale,
+ "seed": seed,
+ "sd_2_upscaling": sd2upscaling,
+ "selected_models": selected_models,
+ "scheduler": scheduler,
+ "edit_instruction": edit_instruction,
+ "image_guidance_scale": image_guidance_scale,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormCompareText2Img, parse_obj_as(type_=BodyAsyncFormCompareText2Img, object_=_response.json())) # type: ignore
+ return typing.cast(CompareText2ImgPageStatusResponse, parse_obj_as(type_=CompareText2ImgPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -1210,19 +2848,77 @@ def text_to_image(
raise ApiError(status_code=_response.status_code, body=_response_json)
def product_image(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormObjectInpainting:
+ self,
+ *,
+ input_image: str,
+ text_prompt: str,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ obj_scale: typing.Optional[float] = None,
+ obj_pos_x: typing.Optional[float] = None,
+ obj_pos_y: typing.Optional[float] = None,
+ mask_threshold: typing.Optional[float] = None,
+ selected_model: typing.Optional[ProductImageRequestSelectedModel] = None,
+ negative_prompt: typing.Optional[str] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[int] = None,
+ output_width: typing.Optional[int] = None,
+ output_height: typing.Optional[int] = None,
+ guidance_scale: typing.Optional[float] = None,
+ sd2upscaling: typing.Optional[bool] = None,
+ seed: typing.Optional[int] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> ObjectInpaintingPageStatusResponse:
"""
Parameters
----------
+ input_image : str
+
+ text_prompt : str
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ obj_scale : typing.Optional[float]
+
+ obj_pos_x : typing.Optional[float]
+
+ obj_pos_y : typing.Optional[float]
+
+ mask_threshold : typing.Optional[float]
+
+ selected_model : typing.Optional[ProductImageRequestSelectedModel]
+
+ negative_prompt : typing.Optional[str]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ output_width : typing.Optional[int]
+
+ output_height : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ sd2upscaling : typing.Optional[bool]
+
+ seed : typing.Optional[int]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormObjectInpainting
+ ObjectInpaintingPageStatusResponse
Successful Response
Examples
@@ -1232,17 +2928,42 @@ def product_image(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.product_image()
+ client.product_image(
+ input_image="input_image",
+ text_prompt="text_prompt",
+ )
"""
_response = self._client_wrapper.httpx_client.request(
"v3/ObjectInpainting/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "input_image": input_image,
+ "text_prompt": text_prompt,
+ "obj_scale": obj_scale,
+ "obj_pos_x": obj_pos_x,
+ "obj_pos_y": obj_pos_y,
+ "mask_threshold": mask_threshold,
+ "selected_model": selected_model,
+ "negative_prompt": negative_prompt,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "output_width": output_width,
+ "output_height": output_height,
+ "guidance_scale": guidance_scale,
+ "sd_2_upscaling": sd2upscaling,
+ "seed": seed,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormObjectInpainting, parse_obj_as(type_=BodyAsyncFormObjectInpainting, object_=_response.json())) # type: ignore
+ return typing.cast(ObjectInpaintingPageStatusResponse, parse_obj_as(type_=ObjectInpaintingPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -1269,19 +2990,74 @@ def product_image(
raise ApiError(status_code=_response.status_code, body=_response_json)
def portrait(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormFaceInpainting:
+ self,
+ *,
+ input_image: str,
+ text_prompt: str,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ face_scale: typing.Optional[float] = None,
+ face_pos_x: typing.Optional[float] = None,
+ face_pos_y: typing.Optional[float] = None,
+ selected_model: typing.Optional[PortraitRequestSelectedModel] = None,
+ negative_prompt: typing.Optional[str] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[int] = None,
+ upscale_factor: typing.Optional[float] = None,
+ output_width: typing.Optional[int] = None,
+ output_height: typing.Optional[int] = None,
+ guidance_scale: typing.Optional[float] = None,
+ seed: typing.Optional[int] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> FaceInpaintingPageStatusResponse:
"""
Parameters
----------
+ input_image : str
+
+ text_prompt : str
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ face_scale : typing.Optional[float]
+
+ face_pos_x : typing.Optional[float]
+
+ face_pos_y : typing.Optional[float]
+
+ selected_model : typing.Optional[PortraitRequestSelectedModel]
+
+ negative_prompt : typing.Optional[str]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ upscale_factor : typing.Optional[float]
+
+ output_width : typing.Optional[int]
+
+ output_height : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ seed : typing.Optional[int]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormFaceInpainting
+ FaceInpaintingPageStatusResponse
Successful Response
Examples
@@ -1291,17 +3067,41 @@ def portrait(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.portrait()
+ client.portrait(
+ input_image="input_image",
+ text_prompt="text_prompt",
+ )
"""
_response = self._client_wrapper.httpx_client.request(
"v3/FaceInpainting/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "input_image": input_image,
+ "text_prompt": text_prompt,
+ "face_scale": face_scale,
+ "face_pos_x": face_pos_x,
+ "face_pos_y": face_pos_y,
+ "selected_model": selected_model,
+ "negative_prompt": negative_prompt,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "upscale_factor": upscale_factor,
+ "output_width": output_width,
+ "output_height": output_height,
+ "guidance_scale": guidance_scale,
+ "seed": seed,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormFaceInpainting, parse_obj_as(type_=BodyAsyncFormFaceInpainting, object_=_response.json())) # type: ignore
+ return typing.cast(FaceInpaintingPageStatusResponse, parse_obj_as(type_=FaceInpaintingPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -1328,19 +3128,101 @@ def portrait(
raise ApiError(status_code=_response.status_code, body=_response_json)
def image_from_email(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormEmailFaceInpainting:
+ self,
+ *,
+ text_prompt: str,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ email_address: typing.Optional[str] = None,
+ twitter_handle: typing.Optional[str] = None,
+ face_scale: typing.Optional[float] = None,
+ face_pos_x: typing.Optional[float] = None,
+ face_pos_y: typing.Optional[float] = None,
+ selected_model: typing.Optional[ImageFromEmailRequestSelectedModel] = None,
+ negative_prompt: typing.Optional[str] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[int] = None,
+ upscale_factor: typing.Optional[float] = None,
+ output_width: typing.Optional[int] = None,
+ output_height: typing.Optional[int] = None,
+ guidance_scale: typing.Optional[float] = None,
+ should_send_email: typing.Optional[bool] = None,
+ email_from: typing.Optional[str] = None,
+ email_cc: typing.Optional[str] = None,
+ email_bcc: typing.Optional[str] = None,
+ email_subject: typing.Optional[str] = None,
+ email_body: typing.Optional[str] = None,
+ email_body_enable_html: typing.Optional[bool] = None,
+ fallback_email_body: typing.Optional[str] = None,
+ seed: typing.Optional[int] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> EmailFaceInpaintingPageStatusResponse:
"""
Parameters
----------
+ text_prompt : str
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ email_address : typing.Optional[str]
+
+ twitter_handle : typing.Optional[str]
+
+ face_scale : typing.Optional[float]
+
+ face_pos_x : typing.Optional[float]
+
+ face_pos_y : typing.Optional[float]
+
+ selected_model : typing.Optional[ImageFromEmailRequestSelectedModel]
+
+ negative_prompt : typing.Optional[str]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ upscale_factor : typing.Optional[float]
+
+ output_width : typing.Optional[int]
+
+ output_height : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ should_send_email : typing.Optional[bool]
+
+ email_from : typing.Optional[str]
+
+ email_cc : typing.Optional[str]
+
+ email_bcc : typing.Optional[str]
+
+ email_subject : typing.Optional[str]
+
+ email_body : typing.Optional[str]
+
+ email_body_enable_html : typing.Optional[bool]
+
+ fallback_email_body : typing.Optional[str]
+
+ seed : typing.Optional[int]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormEmailFaceInpainting
+ EmailFaceInpaintingPageStatusResponse
Successful Response
Examples
@@ -1350,17 +3232,49 @@ def image_from_email(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.image_from_email()
+ client.image_from_email(
+ text_prompt="text_prompt",
+ )
"""
_response = self._client_wrapper.httpx_client.request(
"v3/EmailFaceInpainting/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "email_address": email_address,
+ "twitter_handle": twitter_handle,
+ "text_prompt": text_prompt,
+ "face_scale": face_scale,
+ "face_pos_x": face_pos_x,
+ "face_pos_y": face_pos_y,
+ "selected_model": selected_model,
+ "negative_prompt": negative_prompt,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "upscale_factor": upscale_factor,
+ "output_width": output_width,
+ "output_height": output_height,
+ "guidance_scale": guidance_scale,
+ "should_send_email": should_send_email,
+ "email_from": email_from,
+ "email_cc": email_cc,
+ "email_bcc": email_bcc,
+ "email_subject": email_subject,
+ "email_body": email_body,
+ "email_body_enable_html": email_body_enable_html,
+ "fallback_email_body": fallback_email_body,
+ "seed": seed,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormEmailFaceInpainting, parse_obj_as(type_=BodyAsyncFormEmailFaceInpainting, object_=_response.json())) # type: ignore
+ return typing.cast(EmailFaceInpaintingPageStatusResponse, parse_obj_as(type_=EmailFaceInpaintingPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -1387,19 +3301,72 @@ def image_from_email(
raise ApiError(status_code=_response.status_code, body=_response_json)
def image_from_web_search(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormGoogleImageGen:
+ self,
+ *,
+ search_query: str,
+ text_prompt: str,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ serp_search_location: typing.Optional[SerpSearchLocation] = None,
+ scaleserp_locations: typing.Optional[typing.List[str]] = None,
+ selected_model: typing.Optional[ImageFromWebSearchRequestSelectedModel] = None,
+ negative_prompt: typing.Optional[str] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[int] = None,
+ guidance_scale: typing.Optional[float] = None,
+ prompt_strength: typing.Optional[float] = None,
+ sd2upscaling: typing.Optional[bool] = None,
+ seed: typing.Optional[int] = None,
+ image_guidance_scale: typing.Optional[float] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> GoogleImageGenPageStatusResponse:
"""
Parameters
----------
+ search_query : str
+
+ text_prompt : str
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ serp_search_location : typing.Optional[SerpSearchLocation]
+
+ scaleserp_locations : typing.Optional[typing.List[str]]
+ DEPRECATED: use `serp_search_location` instead
+
+ selected_model : typing.Optional[ImageFromWebSearchRequestSelectedModel]
+
+ negative_prompt : typing.Optional[str]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ prompt_strength : typing.Optional[float]
+
+ sd2upscaling : typing.Optional[bool]
+
+ seed : typing.Optional[int]
+
+ image_guidance_scale : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormGoogleImageGen
+ GoogleImageGenPageStatusResponse
Successful Response
Examples
@@ -1409,17 +3376,40 @@ def image_from_web_search(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.image_from_web_search()
+ client.image_from_web_search(
+ search_query="search_query",
+ text_prompt="text_prompt",
+ )
"""
_response = self._client_wrapper.httpx_client.request(
"v3/GoogleImageGen/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "serp_search_location": serp_search_location,
+ "scaleserp_locations": scaleserp_locations,
+ "search_query": search_query,
+ "text_prompt": text_prompt,
+ "selected_model": selected_model,
+ "negative_prompt": negative_prompt,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "guidance_scale": guidance_scale,
+ "prompt_strength": prompt_strength,
+ "sd_2_upscaling": sd2upscaling,
+ "seed": seed,
+ "image_guidance_scale": image_guidance_scale,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormGoogleImageGen, parse_obj_as(type_=BodyAsyncFormGoogleImageGen, object_=_response.json())) # type: ignore
+ return typing.cast(GoogleImageGenPageStatusResponse, parse_obj_as(type_=GoogleImageGenPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -1446,19 +3436,56 @@ def image_from_web_search(
raise ApiError(status_code=_response.status_code, body=_response_json)
def remove_background(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormImageSegmentation:
+ self,
+ *,
+ input_image: str,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ selected_model: typing.Optional[RemoveBackgroundRequestSelectedModel] = None,
+ mask_threshold: typing.Optional[float] = None,
+ rect_persepective_transform: typing.Optional[bool] = None,
+ reflection_opacity: typing.Optional[float] = None,
+ obj_scale: typing.Optional[float] = None,
+ obj_pos_x: typing.Optional[float] = None,
+ obj_pos_y: typing.Optional[float] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> ImageSegmentationPageStatusResponse:
"""
Parameters
----------
+ input_image : str
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ selected_model : typing.Optional[RemoveBackgroundRequestSelectedModel]
+
+ mask_threshold : typing.Optional[float]
+
+ rect_persepective_transform : typing.Optional[bool]
+
+ reflection_opacity : typing.Optional[float]
+
+ obj_scale : typing.Optional[float]
+
+ obj_pos_x : typing.Optional[float]
+
+ obj_pos_y : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormImageSegmentation
+ ImageSegmentationPageStatusResponse
Successful Response
Examples
@@ -1468,17 +3495,34 @@ def remove_background(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.remove_background()
+ client.remove_background(
+ input_image="input_image",
+ )
"""
_response = self._client_wrapper.httpx_client.request(
"v3/ImageSegmentation/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "input_image": input_image,
+ "selected_model": selected_model,
+ "mask_threshold": mask_threshold,
+ "rect_persepective_transform": rect_persepective_transform,
+ "reflection_opacity": reflection_opacity,
+ "obj_scale": obj_scale,
+ "obj_pos_x": obj_pos_x,
+ "obj_pos_y": obj_pos_y,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormImageSegmentation, parse_obj_as(type_=BodyAsyncFormImageSegmentation, object_=_response.json())) # type: ignore
+ return typing.cast(ImageSegmentationPageStatusResponse, parse_obj_as(type_=ImageSegmentationPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -1505,19 +3549,50 @@ def remove_background(
raise ApiError(status_code=_response.status_code, body=_response_json)
def upscale(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormCompareAiUpscalers:
+ self,
+ *,
+ scale: int,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ input_image: typing.Optional[str] = None,
+ input_video: typing.Optional[str] = None,
+ selected_models: typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]] = None,
+ selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> CompareUpscalerPageStatusResponse:
"""
Parameters
----------
+ scale : int
+ The final upsampling scale of the image
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ input_image : typing.Optional[str]
+ Input Image
+
+ input_video : typing.Optional[str]
+ Input Video
+
+ selected_models : typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]]
+
+ selected_bg_model : typing.Optional[typing.Literal["real_esrgan_x2"]]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormCompareAiUpscalers
+ CompareUpscalerPageStatusResponse
Successful Response
Examples
@@ -1527,17 +3602,31 @@ def upscale(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.upscale()
+ client.upscale(
+ scale=1,
+ )
"""
_response = self._client_wrapper.httpx_client.request(
"v3/compare-ai-upscalers/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "input_image": input_image,
+ "input_video": input_video,
+ "scale": scale,
+ "selected_models": selected_models,
+ "selected_bg_model": selected_bg_model,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormCompareAiUpscalers, parse_obj_as(type_=BodyAsyncFormCompareAiUpscalers, object_=_response.json())) # type: ignore
+ return typing.cast(CompareUpscalerPageStatusResponse, parse_obj_as(type_=CompareUpscalerPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -1564,19 +3653,38 @@ def upscale(
raise ApiError(status_code=_response.status_code, body=_response_json)
def embed(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormEmbeddings:
+ self,
+ *,
+ texts: typing.List[str],
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ selected_model: typing.Optional[EmbedRequestSelectedModel] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> EmbeddingsPageStatusResponse:
"""
Parameters
----------
+ texts : typing.List[str]
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ selected_model : typing.Optional[EmbedRequestSelectedModel]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormEmbeddings
+ EmbeddingsPageStatusResponse
Successful Response
Examples
@@ -1586,17 +3694,28 @@ def embed(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.embed()
+ client.embed(
+ texts=["texts"],
+ )
"""
_response = self._client_wrapper.httpx_client.request(
"v3/embeddings/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "texts": texts,
+ "selected_model": selected_model,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormEmbeddings, parse_obj_as(type_=BodyAsyncFormEmbeddings, object_=_response.json())) # type: ignore
+ return typing.cast(EmbeddingsPageStatusResponse, parse_obj_as(type_=EmbeddingsPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -1623,19 +3742,107 @@ def embed(
raise ApiError(status_code=_response.status_code, body=_response_json)
def seo_people_also_ask_doc(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormRelatedQnaMakerDoc:
+ self,
+ *,
+ search_query: str,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ keyword_query: typing.Optional[SeoPeopleAlsoAskDocRequestKeywordQuery] = None,
+ documents: typing.Optional[typing.List[str]] = None,
+ max_references: typing.Optional[int] = None,
+ max_context_words: typing.Optional[int] = None,
+ scroll_jump: typing.Optional[int] = None,
+ doc_extract_url: typing.Optional[str] = None,
+ embedding_model: typing.Optional[SeoPeopleAlsoAskDocRequestEmbeddingModel] = None,
+ dense_weight: typing.Optional[float] = None,
+ task_instructions: typing.Optional[str] = None,
+ query_instructions: typing.Optional[str] = None,
+ selected_model: typing.Optional[SeoPeopleAlsoAskDocRequestSelectedModel] = None,
+ citation_style: typing.Optional[SeoPeopleAlsoAskDocRequestCitationStyle] = None,
+ avoid_repetition: typing.Optional[bool] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[float] = None,
+ max_tokens: typing.Optional[int] = None,
+ sampling_temperature: typing.Optional[float] = None,
+ response_format_type: typing.Optional[SeoPeopleAlsoAskDocRequestResponseFormatType] = None,
+ serp_search_location: typing.Optional[SerpSearchLocation] = None,
+ scaleserp_locations: typing.Optional[typing.List[str]] = None,
+ serp_search_type: typing.Optional[SerpSearchType] = None,
+ scaleserp_search_field: typing.Optional[str] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> RelatedQnADocPageStatusResponse:
"""
Parameters
----------
+ search_query : str
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ keyword_query : typing.Optional[SeoPeopleAlsoAskDocRequestKeywordQuery]
+
+ documents : typing.Optional[typing.List[str]]
+
+ max_references : typing.Optional[int]
+
+ max_context_words : typing.Optional[int]
+
+ scroll_jump : typing.Optional[int]
+
+ doc_extract_url : typing.Optional[str]
+
+ embedding_model : typing.Optional[SeoPeopleAlsoAskDocRequestEmbeddingModel]
+
+ dense_weight : typing.Optional[float]
+
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+
+ task_instructions : typing.Optional[str]
+
+ query_instructions : typing.Optional[str]
+
+ selected_model : typing.Optional[SeoPeopleAlsoAskDocRequestSelectedModel]
+
+ citation_style : typing.Optional[SeoPeopleAlsoAskDocRequestCitationStyle]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ response_format_type : typing.Optional[SeoPeopleAlsoAskDocRequestResponseFormatType]
+
+ serp_search_location : typing.Optional[SerpSearchLocation]
+
+ scaleserp_locations : typing.Optional[typing.List[str]]
+ DEPRECATED: use `serp_search_location` instead
+
+ serp_search_type : typing.Optional[SerpSearchType]
+
+ scaleserp_search_field : typing.Optional[str]
+ DEPRECATED: use `serp_search_type` instead
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormRelatedQnaMakerDoc
+ RelatedQnADocPageStatusResponse
Successful Response
Examples
@@ -1645,17 +3852,49 @@ def seo_people_also_ask_doc(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.seo_people_also_ask_doc()
+ client.seo_people_also_ask_doc(
+ search_query="search_query",
+ )
"""
_response = self._client_wrapper.httpx_client.request(
"v3/related-qna-maker-doc/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "search_query": search_query,
+ "keyword_query": keyword_query,
+ "documents": documents,
+ "max_references": max_references,
+ "max_context_words": max_context_words,
+ "scroll_jump": scroll_jump,
+ "doc_extract_url": doc_extract_url,
+ "embedding_model": embedding_model,
+ "dense_weight": dense_weight,
+ "task_instructions": task_instructions,
+ "query_instructions": query_instructions,
+ "selected_model": selected_model,
+ "citation_style": citation_style,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "response_format_type": response_format_type,
+ "serp_search_location": serp_search_location,
+ "scaleserp_locations": scaleserp_locations,
+ "serp_search_type": serp_search_type,
+ "scaleserp_search_field": scaleserp_search_field,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormRelatedQnaMakerDoc, parse_obj_as(type_=BodyAsyncFormRelatedQnaMakerDoc, object_=_response.json())) # type: ignore
+ return typing.cast(RelatedQnADocPageStatusResponse, parse_obj_as(type_=RelatedQnADocPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -1711,9 +3950,7 @@ def health_status_get(self, *, request_options: typing.Optional[RequestOptions]
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def post_v3chyron_plant_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> ChyronPlantPageResponse:
+ def post_v3chyron_plant_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -1722,7 +3959,7 @@ def post_v3chyron_plant_async(
Returns
-------
- ChyronPlantPageResponse
+ typing.Any
Successful Response
Examples
@@ -1739,15 +3976,13 @@ def post_v3chyron_plant_async(
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(ChyronPlantPageResponse, parse_obj_as(type_=ChyronPlantPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def post_v3compare_llm_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> CompareLlmPageResponse:
+ def post_v3compare_llm_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -1756,7 +3991,7 @@ def post_v3compare_llm_async(
Returns
-------
- CompareLlmPageResponse
+ typing.Any
Successful Response
Examples
@@ -1773,15 +4008,13 @@ def post_v3compare_llm_async(
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(CompareLlmPageResponse, parse_obj_as(type_=CompareLlmPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def post_v3compare_text2img_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> CompareText2ImgPageResponse:
+ def post_v3compare_text2img_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -1790,7 +4023,7 @@ def post_v3compare_text2img_async(
Returns
-------
- CompareText2ImgPageResponse
+ typing.Any
Successful Response
Examples
@@ -1807,15 +4040,13 @@ def post_v3compare_text2img_async(
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(CompareText2ImgPageResponse, parse_obj_as(type_=CompareText2ImgPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def post_v3deforum_sd_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> DeforumSdPageResponse:
+ def post_v3deforum_sd_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -1824,7 +4055,7 @@ def post_v3deforum_sd_async(
Returns
-------
- DeforumSdPageResponse
+ typing.Any
Successful Response
Examples
@@ -1841,7 +4072,7 @@ def post_v3deforum_sd_async(
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(DeforumSdPageResponse, parse_obj_as(type_=DeforumSdPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
@@ -1849,7 +4080,7 @@ def post_v3deforum_sd_async(
def post_v3email_face_inpainting_async(
self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> EmailFaceInpaintingPageResponse:
+ ) -> typing.Any:
"""
Parameters
----------
@@ -1858,7 +4089,7 @@ def post_v3email_face_inpainting_async(
Returns
-------
- EmailFaceInpaintingPageResponse
+ typing.Any
Successful Response
Examples
@@ -1875,15 +4106,13 @@ def post_v3email_face_inpainting_async(
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(EmailFaceInpaintingPageResponse, parse_obj_as(type_=EmailFaceInpaintingPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def post_v3face_inpainting_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> FaceInpaintingPageResponse:
+ def post_v3face_inpainting_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -1892,7 +4121,7 @@ def post_v3face_inpainting_async(
Returns
-------
- FaceInpaintingPageResponse
+ typing.Any
Successful Response
Examples
@@ -1909,15 +4138,13 @@ def post_v3face_inpainting_async(
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(FaceInpaintingPageResponse, parse_obj_as(type_=FaceInpaintingPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def post_v3google_image_gen_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> GoogleImageGenPageResponse:
+ def post_v3google_image_gen_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -1926,7 +4153,7 @@ def post_v3google_image_gen_async(
Returns
-------
- GoogleImageGenPageResponse
+ typing.Any
Successful Response
Examples
@@ -1943,15 +4170,13 @@ def post_v3google_image_gen_async(
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(GoogleImageGenPageResponse, parse_obj_as(type_=GoogleImageGenPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def post_v3image_segmentation_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> ImageSegmentationPageResponse:
+ def post_v3image_segmentation_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -1960,7 +4185,7 @@ def post_v3image_segmentation_async(
Returns
-------
- ImageSegmentationPageResponse
+ typing.Any
Successful Response
Examples
@@ -1977,13 +4202,13 @@ def post_v3image_segmentation_async(
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(ImageSegmentationPageResponse, parse_obj_as(type_=ImageSegmentationPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def post_v3img2img_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> Img2ImgPageResponse:
+ def post_v3img2img_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -1992,7 +4217,7 @@ def post_v3img2img_async(self, *, request_options: typing.Optional[RequestOption
Returns
-------
- Img2ImgPageResponse
+ typing.Any
Successful Response
Examples
@@ -2009,15 +4234,13 @@ def post_v3img2img_async(self, *, request_options: typing.Optional[RequestOption
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(Img2ImgPageResponse, parse_obj_as(type_=Img2ImgPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def post_v3letter_writer_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> LetterWriterPageResponse:
+ def post_v3letter_writer_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -2026,7 +4249,7 @@ def post_v3letter_writer_async(
Returns
-------
- LetterWriterPageResponse
+ typing.Any
Successful Response
Examples
@@ -2043,13 +4266,13 @@ def post_v3letter_writer_async(
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(LetterWriterPageResponse, parse_obj_as(type_=LetterWriterPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def post_v3lipsync_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> LipsyncPageResponse:
+ def post_v3lipsync_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -2058,7 +4281,7 @@ def post_v3lipsync_async(self, *, request_options: typing.Optional[RequestOption
Returns
-------
- LipsyncPageResponse
+ typing.Any
Successful Response
Examples
@@ -2075,15 +4298,13 @@ def post_v3lipsync_async(self, *, request_options: typing.Optional[RequestOption
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(LipsyncPageResponse, parse_obj_as(type_=LipsyncPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def post_v3lipsync_tts_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> LipsyncTtsPageResponse:
+ def post_v3lipsync_tts_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -2092,7 +4313,7 @@ def post_v3lipsync_tts_async(
Returns
-------
- LipsyncTtsPageResponse
+ typing.Any
Successful Response
Examples
@@ -2109,15 +4330,13 @@ def post_v3lipsync_tts_async(
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(LipsyncTtsPageResponse, parse_obj_as(type_=LipsyncTtsPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def post_v3object_inpainting_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> ObjectInpaintingPageResponse:
+ def post_v3object_inpainting_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -2126,7 +4345,7 @@ def post_v3object_inpainting_async(
Returns
-------
- ObjectInpaintingPageResponse
+ typing.Any
Successful Response
Examples
@@ -2143,15 +4362,13 @@ def post_v3object_inpainting_async(
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(ObjectInpaintingPageResponse, parse_obj_as(type_=ObjectInpaintingPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def post_v3seo_summary_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> SeoSummaryPageResponse:
+ def post_v3seo_summary_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -2160,7 +4377,7 @@ def post_v3seo_summary_async(
Returns
-------
- SeoSummaryPageResponse
+ typing.Any
Successful Response
Examples
@@ -2177,15 +4394,13 @@ def post_v3seo_summary_async(
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(SeoSummaryPageResponse, parse_obj_as(type_=SeoSummaryPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def post_v3smart_gpt_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> SmartGptPageResponse:
+ def post_v3smart_gpt_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -2194,7 +4409,7 @@ def post_v3smart_gpt_async(
Returns
-------
- SmartGptPageResponse
+ typing.Any
Successful Response
Examples
@@ -2211,7 +4426,7 @@ def post_v3smart_gpt_async(
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(SmartGptPageResponse, parse_obj_as(type_=SmartGptPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
@@ -2219,7 +4434,7 @@ def post_v3smart_gpt_async(
def post_v3social_lookup_email_async(
self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> SocialLookupEmailPageResponse:
+ ) -> typing.Any:
"""
Parameters
----------
@@ -2228,7 +4443,7 @@ def post_v3social_lookup_email_async(
Returns
-------
- SocialLookupEmailPageResponse
+ typing.Any
Successful Response
Examples
@@ -2245,15 +4460,13 @@ def post_v3social_lookup_email_async(
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(SocialLookupEmailPageResponse, parse_obj_as(type_=SocialLookupEmailPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def post_v3text_to_speech_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> TextToSpeechPageResponse:
+ def post_v3text_to_speech_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -2262,7 +4475,7 @@ def post_v3text_to_speech_async(
Returns
-------
- TextToSpeechPageResponse
+ typing.Any
Successful Response
Examples
@@ -2279,15 +4492,13 @@ def post_v3text_to_speech_async(
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(TextToSpeechPageResponse, parse_obj_as(type_=TextToSpeechPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def post_v3art_qr_code_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> QrCodeGeneratorPageResponse:
+ def post_v3art_qr_code_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -2296,7 +4507,7 @@ def post_v3art_qr_code_async(
Returns
-------
- QrCodeGeneratorPageResponse
+ typing.Any
Successful Response
Examples
@@ -2313,13 +4524,13 @@ def post_v3art_qr_code_async(
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(QrCodeGeneratorPageResponse, parse_obj_as(type_=QrCodeGeneratorPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def post_v3asr_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> AsrPageResponse:
+ def post_v3asr_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -2328,7 +4539,7 @@ def post_v3asr_async(self, *, request_options: typing.Optional[RequestOptions] =
Returns
-------
- AsrPageResponse
+ typing.Any
Successful Response
Examples
@@ -2345,15 +4556,13 @@ def post_v3asr_async(self, *, request_options: typing.Optional[RequestOptions] =
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(AsrPageResponse, parse_obj_as(type_=AsrPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def post_v3bulk_eval_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> BulkEvalPageResponse:
+ def post_v3bulk_eval_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -2362,7 +4571,7 @@ def post_v3bulk_eval_async(
Returns
-------
- BulkEvalPageResponse
+ typing.Any
Successful Response
Examples
@@ -2379,15 +4588,13 @@ def post_v3bulk_eval_async(
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BulkEvalPageResponse, parse_obj_as(type_=BulkEvalPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def post_v3bulk_runner_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> BulkRunnerPageResponse:
+ def post_v3bulk_runner_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -2396,7 +4603,7 @@ def post_v3bulk_runner_async(
Returns
-------
- BulkRunnerPageResponse
+ typing.Any
Successful Response
Examples
@@ -2413,7 +4620,7 @@ def post_v3bulk_runner_async(
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BulkRunnerPageResponse, parse_obj_as(type_=BulkRunnerPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
@@ -2421,7 +4628,7 @@ def post_v3bulk_runner_async(
def post_v3compare_ai_upscalers_async(
self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> CompareUpscalerPageResponse:
+ ) -> typing.Any:
"""
Parameters
----------
@@ -2430,7 +4637,7 @@ def post_v3compare_ai_upscalers_async(
Returns
-------
- CompareUpscalerPageResponse
+ typing.Any
Successful Response
Examples
@@ -2447,15 +4654,13 @@ def post_v3compare_ai_upscalers_async(
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(CompareUpscalerPageResponse, parse_obj_as(type_=CompareUpscalerPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def post_v3doc_extract_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> DocExtractPageResponse:
+ def post_v3doc_extract_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -2464,7 +4669,7 @@ def post_v3doc_extract_async(
Returns
-------
- DocExtractPageResponse
+ typing.Any
Successful Response
Examples
@@ -2481,15 +4686,13 @@ def post_v3doc_extract_async(
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(DocExtractPageResponse, parse_obj_as(type_=DocExtractPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def post_v3doc_search_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> DocSearchPageResponse:
+ def post_v3doc_search_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -2498,7 +4701,7 @@ def post_v3doc_search_async(
Returns
-------
- DocSearchPageResponse
+ typing.Any
Successful Response
Examples
@@ -2515,15 +4718,13 @@ def post_v3doc_search_async(
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(DocSearchPageResponse, parse_obj_as(type_=DocSearchPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def post_v3doc_summary_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> DocSummaryPageResponse:
+ def post_v3doc_summary_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -2532,7 +4733,7 @@ def post_v3doc_summary_async(
Returns
-------
- DocSummaryPageResponse
+ typing.Any
Successful Response
Examples
@@ -2549,15 +4750,13 @@ def post_v3doc_summary_async(
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(DocSummaryPageResponse, parse_obj_as(type_=DocSummaryPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def post_v3embeddings_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> EmbeddingsPageResponse:
+ def post_v3embeddings_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -2566,7 +4765,7 @@ def post_v3embeddings_async(
Returns
-------
- EmbeddingsPageResponse
+ typing.Any
Successful Response
Examples
@@ -2583,15 +4782,13 @@ def post_v3embeddings_async(
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(EmbeddingsPageResponse, parse_obj_as(type_=EmbeddingsPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def post_v3functions_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> FunctionsPageResponse:
+ def post_v3functions_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -2600,7 +4797,7 @@ def post_v3functions_async(
Returns
-------
- FunctionsPageResponse
+ typing.Any
Successful Response
Examples
@@ -2617,15 +4814,13 @@ def post_v3functions_async(
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(FunctionsPageResponse, parse_obj_as(type_=FunctionsPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def post_v3google_gpt_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> GoogleGptPageResponse:
+ def post_v3google_gpt_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -2634,7 +4829,7 @@ def post_v3google_gpt_async(
Returns
-------
- GoogleGptPageResponse
+ typing.Any
Successful Response
Examples
@@ -2651,7 +4846,7 @@ def post_v3google_gpt_async(
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(GoogleGptPageResponse, parse_obj_as(type_=GoogleGptPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
@@ -2659,7 +4854,7 @@ def post_v3google_gpt_async(
def post_v3related_qna_maker_doc_async(
self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> RelatedQnADocPageResponse:
+ ) -> typing.Any:
"""
Parameters
----------
@@ -2668,7 +4863,7 @@ def post_v3related_qna_maker_doc_async(
Returns
-------
- RelatedQnADocPageResponse
+ typing.Any
Successful Response
Examples
@@ -2685,15 +4880,13 @@ def post_v3related_qna_maker_doc_async(
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(RelatedQnADocPageResponse, parse_obj_as(type_=RelatedQnADocPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def post_v3related_qna_maker_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> RelatedQnAPageResponse:
+ def post_v3related_qna_maker_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -2702,7 +4895,7 @@ def post_v3related_qna_maker_async(
Returns
-------
- RelatedQnAPageResponse
+ typing.Any
Successful Response
Examples
@@ -2719,15 +4912,13 @@ def post_v3related_qna_maker_async(
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(RelatedQnAPageResponse, parse_obj_as(type_=RelatedQnAPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def post_v3text2audio_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> Text2AudioPageResponse:
+ def post_v3text2audio_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -2736,7 +4927,7 @@ def post_v3text2audio_async(
Returns
-------
- Text2AudioPageResponse
+ typing.Any
Successful Response
Examples
@@ -2753,15 +4944,13 @@ def post_v3text2audio_async(
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(Text2AudioPageResponse, parse_obj_as(type_=Text2AudioPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def post_v3translate_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> TranslationPageResponse:
+ def post_v3translate_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -2770,7 +4959,7 @@ def post_v3translate_async(
Returns
-------
- TranslationPageResponse
+ typing.Any
Successful Response
Examples
@@ -2787,15 +4976,13 @@ def post_v3translate_async(
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(TranslationPageResponse, parse_obj_as(type_=TranslationPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def post_v3video_bots_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> VideoBotsPageResponse:
+ def post_v3video_bots_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -2804,7 +4991,7 @@ def post_v3video_bots_async(
Returns
-------
- VideoBotsPageResponse
+ typing.Any
Successful Response
Examples
@@ -2821,7 +5008,7 @@ def post_v3video_bots_async(
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(VideoBotsPageResponse, parse_obj_as(type_=VideoBotsPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
@@ -2890,35 +5077,128 @@ def __init__(
)
self.copilot_integrations = AsyncCopilotIntegrationsClient(client_wrapper=self._client_wrapper)
self.copilot_for_your_enterprise = AsyncCopilotForYourEnterpriseClient(client_wrapper=self._client_wrapper)
+ self.ai_animation_generator = AsyncAiAnimationGeneratorClient(client_wrapper=self._client_wrapper)
+ self.ai_art_qr_code = AsyncAiArtQrCodeClient(client_wrapper=self._client_wrapper)
+ self.generate_people_also_ask_seo_content = AsyncGeneratePeopleAlsoAskSeoContentClient(
+ client_wrapper=self._client_wrapper
+ )
+ self.create_a_perfect_seo_optimized_title_paragraph = AsyncCreateAPerfectSeoOptimizedTitleParagraphClient(
+ client_wrapper=self._client_wrapper
+ )
+ self.web_search_gpt3 = AsyncWebSearchGpt3Client(client_wrapper=self._client_wrapper)
+ self.profile_lookup_gpt3for_ai_personalized_emails = AsyncProfileLookupGpt3ForAiPersonalizedEmailsClient(
+ client_wrapper=self._client_wrapper
+ )
+ self.bulk_runner = AsyncBulkRunnerClient(client_wrapper=self._client_wrapper)
self.evaluator = AsyncEvaluatorClient(client_wrapper=self._client_wrapper)
+ self.synthetic_data_maker_for_videos_pd_fs = AsyncSyntheticDataMakerForVideosPdFsClient(
+ client_wrapper=self._client_wrapper
+ )
+ self.large_language_models_gpt3 = AsyncLargeLanguageModelsGpt3Client(client_wrapper=self._client_wrapper)
+ self.search_your_docs_with_gpt = AsyncSearchYourDocsWithGptClient(client_wrapper=self._client_wrapper)
self.smart_gpt = AsyncSmartGptClient(client_wrapper=self._client_wrapper)
+ self.summarize_your_docs_with_gpt = AsyncSummarizeYourDocsWithGptClient(client_wrapper=self._client_wrapper)
self.functions = AsyncFunctionsClient(client_wrapper=self._client_wrapper)
self.lip_syncing = AsyncLipSyncingClient(client_wrapper=self._client_wrapper)
- self.misc = AsyncMiscClient(client_wrapper=self._client_wrapper)
- self.bulk_runner = AsyncBulkRunnerClient(client_wrapper=self._client_wrapper)
+ self.lipsync_video_with_any_text = AsyncLipsyncVideoWithAnyTextClient(client_wrapper=self._client_wrapper)
+ self.compare_ai_voice_generators = AsyncCompareAiVoiceGeneratorsClient(client_wrapper=self._client_wrapper)
+ self.speech_recognition_translation = AsyncSpeechRecognitionTranslationClient(
+ client_wrapper=self._client_wrapper
+ )
+ self.text_guided_audio_generator = AsyncTextGuidedAudioGeneratorClient(client_wrapper=self._client_wrapper)
+ self.compare_ai_translations = AsyncCompareAiTranslationsClient(client_wrapper=self._client_wrapper)
+ self.edit_an_image_with_ai_prompt = AsyncEditAnImageWithAiPromptClient(client_wrapper=self._client_wrapper)
+ self.compare_ai_image_generators = AsyncCompareAiImageGeneratorsClient(client_wrapper=self._client_wrapper)
+ self.generate_product_photo_backgrounds = AsyncGenerateProductPhotoBackgroundsClient(
+ client_wrapper=self._client_wrapper
+ )
+ self.ai_image_with_a_face = AsyncAiImageWithAFaceClient(client_wrapper=self._client_wrapper)
+ self.ai_generated_photo_from_email_profile_lookup = AsyncAiGeneratedPhotoFromEmailProfileLookupClient(
+ client_wrapper=self._client_wrapper
+ )
+ self.render_image_search_results_with_ai = AsyncRenderImageSearchResultsWithAiClient(
+ client_wrapper=self._client_wrapper
+ )
+ self.ai_background_changer = AsyncAiBackgroundChangerClient(client_wrapper=self._client_wrapper)
+ self.compare_ai_image_upscalers = AsyncCompareAiImageUpscalersClient(client_wrapper=self._client_wrapper)
+ self.chyron_plant_bot = AsyncChyronPlantBotClient(client_wrapper=self._client_wrapper)
+ self.letter_writer = AsyncLetterWriterClient(client_wrapper=self._client_wrapper)
self.embeddings = AsyncEmbeddingsClient(client_wrapper=self._client_wrapper)
+ self.people_also_ask_answers_from_a_doc = AsyncPeopleAlsoAskAnswersFromADocClient(
+ client_wrapper=self._client_wrapper
+ )
+ self.misc = AsyncMiscClient(client_wrapper=self._client_wrapper)
async def animate(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormDeforumSd:
+ self,
+ *,
+ animation_prompts: typing.List[AnimationPrompt],
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ max_frames: typing.Optional[int] = None,
+ selected_model: typing.Optional[AnimateRequestSelectedModel] = None,
+ animation_mode: typing.Optional[str] = None,
+ zoom: typing.Optional[str] = None,
+ translation_x: typing.Optional[str] = None,
+ translation_y: typing.Optional[str] = None,
+ rotation3d_x: typing.Optional[str] = None,
+ rotation3d_y: typing.Optional[str] = None,
+ rotation3d_z: typing.Optional[str] = None,
+ fps: typing.Optional[int] = None,
+ seed: typing.Optional[int] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> DeforumSdPageStatusResponse:
"""
Parameters
----------
+ animation_prompts : typing.List[AnimationPrompt]
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ max_frames : typing.Optional[int]
+
+ selected_model : typing.Optional[AnimateRequestSelectedModel]
+
+ animation_mode : typing.Optional[str]
+
+ zoom : typing.Optional[str]
+
+ translation_x : typing.Optional[str]
+
+ translation_y : typing.Optional[str]
+
+ rotation3d_x : typing.Optional[str]
+
+ rotation3d_y : typing.Optional[str]
+
+ rotation3d_z : typing.Optional[str]
+
+ fps : typing.Optional[int]
+
+ seed : typing.Optional[int]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormDeforumSd
+ DeforumSdPageStatusResponse
Successful Response
Examples
--------
import asyncio
- from gooey import AsyncGooey
+ from gooey import AnimationPrompt, AsyncGooey
client = AsyncGooey(
api_key="YOUR_API_KEY",
@@ -2926,17 +5206,46 @@ async def animate(
async def main() -> None:
- await client.animate()
+ await client.animate(
+ animation_prompts=[
+ AnimationPrompt(
+ frame="frame",
+ prompt="prompt",
+ )
+ ],
+ )
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v3/DeforumSD/async/form", method="POST", params={"example_id": example_id}, request_options=request_options
+ "v3/DeforumSD/async/form",
+ method="POST",
+ params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "animation_prompts": animation_prompts,
+ "max_frames": max_frames,
+ "selected_model": selected_model,
+ "animation_mode": animation_mode,
+ "zoom": zoom,
+ "translation_x": translation_x,
+ "translation_y": translation_y,
+ "rotation_3d_x": rotation3d_x,
+ "rotation_3d_y": rotation3d_y,
+ "rotation_3d_z": rotation3d_z,
+ "fps": fps,
+ "seed": seed,
+ "settings": settings,
+ },
+ files={},
+ request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormDeforumSd, parse_obj_as(type_=BodyAsyncFormDeforumSd, object_=_response.json())) # type: ignore
+ return typing.cast(DeforumSdPageStatusResponse, parse_obj_as(type_=DeforumSdPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -2963,19 +5272,112 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response_json)
async def qr_code(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormArtQrCode:
+ self,
+ *,
+ text_prompt: str,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ qr_code_data: typing.Optional[str] = None,
+ qr_code_input_image: typing.Optional[str] = None,
+ qr_code_vcard: typing.Optional[Vcard] = None,
+ qr_code_file: typing.Optional[str] = None,
+ use_url_shortener: typing.Optional[bool] = None,
+ negative_prompt: typing.Optional[str] = None,
+ image_prompt: typing.Optional[str] = None,
+ image_prompt_controlnet_models: typing.Optional[
+ typing.List[QrCodeRequestImagePromptControlnetModelsItem]
+ ] = None,
+ image_prompt_strength: typing.Optional[float] = None,
+ image_prompt_scale: typing.Optional[float] = None,
+ image_prompt_pos_x: typing.Optional[float] = None,
+ image_prompt_pos_y: typing.Optional[float] = None,
+ selected_model: typing.Optional[QrCodeRequestSelectedModel] = None,
+ selected_controlnet_model: typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]] = None,
+ output_width: typing.Optional[int] = None,
+ output_height: typing.Optional[int] = None,
+ guidance_scale: typing.Optional[float] = None,
+ controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[int] = None,
+ scheduler: typing.Optional[QrCodeRequestScheduler] = None,
+ seed: typing.Optional[int] = None,
+ obj_scale: typing.Optional[float] = None,
+ obj_pos_x: typing.Optional[float] = None,
+ obj_pos_y: typing.Optional[float] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> QrCodeGeneratorPageStatusResponse:
"""
Parameters
----------
+ text_prompt : str
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ qr_code_data : typing.Optional[str]
+
+ qr_code_input_image : typing.Optional[str]
+
+ qr_code_vcard : typing.Optional[Vcard]
+
+ qr_code_file : typing.Optional[str]
+
+ use_url_shortener : typing.Optional[bool]
+
+ negative_prompt : typing.Optional[str]
+
+ image_prompt : typing.Optional[str]
+
+ image_prompt_controlnet_models : typing.Optional[typing.List[QrCodeRequestImagePromptControlnetModelsItem]]
+
+ image_prompt_strength : typing.Optional[float]
+
+ image_prompt_scale : typing.Optional[float]
+
+ image_prompt_pos_x : typing.Optional[float]
+
+ image_prompt_pos_y : typing.Optional[float]
+
+ selected_model : typing.Optional[QrCodeRequestSelectedModel]
+
+ selected_controlnet_model : typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]]
+
+ output_width : typing.Optional[int]
+
+ output_height : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ controlnet_conditioning_scale : typing.Optional[typing.List[float]]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ scheduler : typing.Optional[QrCodeRequestScheduler]
+
+ seed : typing.Optional[int]
+
+ obj_scale : typing.Optional[float]
+
+ obj_pos_x : typing.Optional[float]
+
+ obj_pos_y : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormArtQrCode
+ QrCodeGeneratorPageStatusResponse
Successful Response
Examples
@@ -2990,7 +5392,9 @@ async def qr_code(
async def main() -> None:
- await client.qr_code()
+ await client.qr_code(
+ text_prompt="text_prompt",
+ )
asyncio.run(main())
@@ -2999,11 +5403,44 @@ async def main() -> None:
"v3/art-qr-code/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "qr_code_data": qr_code_data,
+ "qr_code_input_image": qr_code_input_image,
+ "qr_code_vcard": qr_code_vcard,
+ "qr_code_file": qr_code_file,
+ "use_url_shortener": use_url_shortener,
+ "text_prompt": text_prompt,
+ "negative_prompt": negative_prompt,
+ "image_prompt": image_prompt,
+ "image_prompt_controlnet_models": image_prompt_controlnet_models,
+ "image_prompt_strength": image_prompt_strength,
+ "image_prompt_scale": image_prompt_scale,
+ "image_prompt_pos_x": image_prompt_pos_x,
+ "image_prompt_pos_y": image_prompt_pos_y,
+ "selected_model": selected_model,
+ "selected_controlnet_model": selected_controlnet_model,
+ "output_width": output_width,
+ "output_height": output_height,
+ "guidance_scale": guidance_scale,
+ "controlnet_conditioning_scale": controlnet_conditioning_scale,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "scheduler": scheduler,
+ "seed": seed,
+ "obj_scale": obj_scale,
+ "obj_pos_x": obj_pos_x,
+ "obj_pos_y": obj_pos_y,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormArtQrCode, parse_obj_as(type_=BodyAsyncFormArtQrCode, object_=_response.json())) # type: ignore
+ return typing.cast(QrCodeGeneratorPageStatusResponse, parse_obj_as(type_=QrCodeGeneratorPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -3030,19 +5467,101 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response_json)
async def seo_people_also_ask(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormRelatedQnaMaker:
+ self,
+ *,
+ search_query: str,
+ site_filter: str,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ task_instructions: typing.Optional[str] = None,
+ query_instructions: typing.Optional[str] = None,
+ selected_model: typing.Optional[SeoPeopleAlsoAskRequestSelectedModel] = None,
+ max_search_urls: typing.Optional[int] = None,
+ max_references: typing.Optional[int] = None,
+ max_context_words: typing.Optional[int] = None,
+ scroll_jump: typing.Optional[int] = None,
+ embedding_model: typing.Optional[SeoPeopleAlsoAskRequestEmbeddingModel] = None,
+ dense_weight: typing.Optional[float] = None,
+ avoid_repetition: typing.Optional[bool] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[float] = None,
+ max_tokens: typing.Optional[int] = None,
+ sampling_temperature: typing.Optional[float] = None,
+ response_format_type: typing.Optional[SeoPeopleAlsoAskRequestResponseFormatType] = None,
+ serp_search_location: typing.Optional[SerpSearchLocation] = None,
+ scaleserp_locations: typing.Optional[typing.List[str]] = None,
+ serp_search_type: typing.Optional[SerpSearchType] = None,
+ scaleserp_search_field: typing.Optional[str] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> RelatedQnAPageStatusResponse:
"""
Parameters
----------
+ search_query : str
+
+ site_filter : str
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ task_instructions : typing.Optional[str]
+
+ query_instructions : typing.Optional[str]
+
+ selected_model : typing.Optional[SeoPeopleAlsoAskRequestSelectedModel]
+
+ max_search_urls : typing.Optional[int]
+
+ max_references : typing.Optional[int]
+
+ max_context_words : typing.Optional[int]
+
+ scroll_jump : typing.Optional[int]
+
+ embedding_model : typing.Optional[SeoPeopleAlsoAskRequestEmbeddingModel]
+
+ dense_weight : typing.Optional[float]
+
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ response_format_type : typing.Optional[SeoPeopleAlsoAskRequestResponseFormatType]
+
+ serp_search_location : typing.Optional[SerpSearchLocation]
+
+ scaleserp_locations : typing.Optional[typing.List[str]]
+ DEPRECATED: use `serp_search_location` instead
+
+ serp_search_type : typing.Optional[SerpSearchType]
+
+ scaleserp_search_field : typing.Optional[str]
+ DEPRECATED: use `serp_search_type` instead
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormRelatedQnaMaker
+ RelatedQnAPageStatusResponse
Successful Response
Examples
@@ -3057,7 +5576,10 @@ async def seo_people_also_ask(
async def main() -> None:
- await client.seo_people_also_ask()
+ await client.seo_people_also_ask(
+ search_query="search_query",
+ site_filter="site_filter",
+ )
asyncio.run(main())
@@ -3066,11 +5588,39 @@ async def main() -> None:
"v3/related-qna-maker/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "search_query": search_query,
+ "site_filter": site_filter,
+ "task_instructions": task_instructions,
+ "query_instructions": query_instructions,
+ "selected_model": selected_model,
+ "max_search_urls": max_search_urls,
+ "max_references": max_references,
+ "max_context_words": max_context_words,
+ "scroll_jump": scroll_jump,
+ "embedding_model": embedding_model,
+ "dense_weight": dense_weight,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "response_format_type": response_format_type,
+ "serp_search_location": serp_search_location,
+ "scaleserp_locations": scaleserp_locations,
+ "serp_search_type": serp_search_type,
+ "scaleserp_search_field": scaleserp_search_field,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormRelatedQnaMaker, parse_obj_as(type_=BodyAsyncFormRelatedQnaMaker, object_=_response.json())) # type: ignore
+ return typing.cast(RelatedQnAPageStatusResponse, parse_obj_as(type_=RelatedQnAPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -3097,19 +5647,87 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response_json)
async def seo_content(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormSeoSummary:
+ self,
+ *,
+ search_query: str,
+ keywords: str,
+ title: str,
+ company_url: str,
+ example_id: typing.Optional[str] = None,
+ task_instructions: typing.Optional[str] = None,
+ enable_html: typing.Optional[bool] = None,
+ selected_model: typing.Optional[SeoContentRequestSelectedModel] = None,
+ max_search_urls: typing.Optional[int] = None,
+ enable_crosslinks: typing.Optional[bool] = None,
+ seed: typing.Optional[int] = None,
+ avoid_repetition: typing.Optional[bool] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[float] = None,
+ max_tokens: typing.Optional[int] = None,
+ sampling_temperature: typing.Optional[float] = None,
+ response_format_type: typing.Optional[SeoContentRequestResponseFormatType] = None,
+ serp_search_location: typing.Optional[SerpSearchLocation] = None,
+ scaleserp_locations: typing.Optional[typing.List[str]] = None,
+ serp_search_type: typing.Optional[SerpSearchType] = None,
+ scaleserp_search_field: typing.Optional[str] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> SeoSummaryPageStatusResponse:
"""
Parameters
----------
+ search_query : str
+
+ keywords : str
+
+ title : str
+
+ company_url : str
+
example_id : typing.Optional[str]
+ task_instructions : typing.Optional[str]
+
+ enable_html : typing.Optional[bool]
+
+ selected_model : typing.Optional[SeoContentRequestSelectedModel]
+
+ max_search_urls : typing.Optional[int]
+
+ enable_crosslinks : typing.Optional[bool]
+
+ seed : typing.Optional[int]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ response_format_type : typing.Optional[SeoContentRequestResponseFormatType]
+
+ serp_search_location : typing.Optional[SerpSearchLocation]
+
+ scaleserp_locations : typing.Optional[typing.List[str]]
+ DEPRECATED: use `serp_search_location` instead
+
+ serp_search_type : typing.Optional[SerpSearchType]
+
+ scaleserp_search_field : typing.Optional[str]
+ DEPRECATED: use `serp_search_type` instead
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormSeoSummary
+ SeoSummaryPageStatusResponse
Successful Response
Examples
@@ -3124,7 +5742,12 @@ async def seo_content(
async def main() -> None:
- await client.seo_content()
+ await client.seo_content(
+ search_query="search_query",
+ keywords="keywords",
+ title="title",
+ company_url="company_url",
+ )
asyncio.run(main())
@@ -3133,11 +5756,36 @@ async def main() -> None:
"v3/SEOSummary/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "search_query": search_query,
+ "keywords": keywords,
+ "title": title,
+ "company_url": company_url,
+ "task_instructions": task_instructions,
+ "enable_html": enable_html,
+ "selected_model": selected_model,
+ "max_search_urls": max_search_urls,
+ "enable_crosslinks": enable_crosslinks,
+ "seed": seed,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "response_format_type": response_format_type,
+ "serp_search_location": serp_search_location,
+ "scaleserp_locations": scaleserp_locations,
+ "serp_search_type": serp_search_type,
+ "scaleserp_search_field": scaleserp_search_field,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormSeoSummary, parse_obj_as(type_=BodyAsyncFormSeoSummary, object_=_response.json())) # type: ignore
+ return typing.cast(SeoSummaryPageStatusResponse, parse_obj_as(type_=SeoSummaryPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -3164,19 +5812,101 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response_json)
async def web_search_llm(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormGoogleGpt:
+ self,
+ *,
+ search_query: str,
+ site_filter: str,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ task_instructions: typing.Optional[str] = None,
+ query_instructions: typing.Optional[str] = None,
+ selected_model: typing.Optional[WebSearchLlmRequestSelectedModel] = None,
+ max_search_urls: typing.Optional[int] = None,
+ max_references: typing.Optional[int] = None,
+ max_context_words: typing.Optional[int] = None,
+ scroll_jump: typing.Optional[int] = None,
+ embedding_model: typing.Optional[WebSearchLlmRequestEmbeddingModel] = None,
+ dense_weight: typing.Optional[float] = None,
+ avoid_repetition: typing.Optional[bool] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[float] = None,
+ max_tokens: typing.Optional[int] = None,
+ sampling_temperature: typing.Optional[float] = None,
+ response_format_type: typing.Optional[WebSearchLlmRequestResponseFormatType] = None,
+ serp_search_location: typing.Optional[SerpSearchLocation] = None,
+ scaleserp_locations: typing.Optional[typing.List[str]] = None,
+ serp_search_type: typing.Optional[SerpSearchType] = None,
+ scaleserp_search_field: typing.Optional[str] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> GoogleGptPageStatusResponse:
"""
Parameters
----------
+ search_query : str
+
+ site_filter : str
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ task_instructions : typing.Optional[str]
+
+ query_instructions : typing.Optional[str]
+
+ selected_model : typing.Optional[WebSearchLlmRequestSelectedModel]
+
+ max_search_urls : typing.Optional[int]
+
+ max_references : typing.Optional[int]
+
+ max_context_words : typing.Optional[int]
+
+ scroll_jump : typing.Optional[int]
+
+ embedding_model : typing.Optional[WebSearchLlmRequestEmbeddingModel]
+
+ dense_weight : typing.Optional[float]
+
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ response_format_type : typing.Optional[WebSearchLlmRequestResponseFormatType]
+
+ serp_search_location : typing.Optional[SerpSearchLocation]
+
+ scaleserp_locations : typing.Optional[typing.List[str]]
+ DEPRECATED: use `serp_search_location` instead
+
+ serp_search_type : typing.Optional[SerpSearchType]
+
+ scaleserp_search_field : typing.Optional[str]
+ DEPRECATED: use `serp_search_type` instead
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormGoogleGpt
+ GoogleGptPageStatusResponse
Successful Response
Examples
@@ -3191,7 +5921,10 @@ async def web_search_llm(
async def main() -> None:
- await client.web_search_llm()
+ await client.web_search_llm(
+ search_query="search_query",
+ site_filter="site_filter",
+ )
asyncio.run(main())
@@ -3200,11 +5933,39 @@ async def main() -> None:
"v3/google-gpt/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "search_query": search_query,
+ "site_filter": site_filter,
+ "task_instructions": task_instructions,
+ "query_instructions": query_instructions,
+ "selected_model": selected_model,
+ "max_search_urls": max_search_urls,
+ "max_references": max_references,
+ "max_context_words": max_context_words,
+ "scroll_jump": scroll_jump,
+ "embedding_model": embedding_model,
+ "dense_weight": dense_weight,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "response_format_type": response_format_type,
+ "serp_search_location": serp_search_location,
+ "scaleserp_locations": scaleserp_locations,
+ "serp_search_type": serp_search_type,
+ "scaleserp_search_field": scaleserp_search_field,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormGoogleGpt, parse_obj_as(type_=BodyAsyncFormGoogleGpt, object_=_response.json())) # type: ignore
+ return typing.cast(GoogleGptPageStatusResponse, parse_obj_as(type_=GoogleGptPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -3231,19 +5992,59 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response_json)
async def personalize_email(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormSocialLookupEmail:
+ self,
+ *,
+ email_address: str,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ input_prompt: typing.Optional[str] = None,
+ selected_model: typing.Optional[PersonalizeEmailRequestSelectedModel] = None,
+ avoid_repetition: typing.Optional[bool] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[float] = None,
+ max_tokens: typing.Optional[int] = None,
+ sampling_temperature: typing.Optional[float] = None,
+ response_format_type: typing.Optional[PersonalizeEmailRequestResponseFormatType] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> SocialLookupEmailPageStatusResponse:
"""
Parameters
----------
+ email_address : str
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ input_prompt : typing.Optional[str]
+
+ selected_model : typing.Optional[PersonalizeEmailRequestSelectedModel]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ response_format_type : typing.Optional[PersonalizeEmailRequestResponseFormatType]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormSocialLookupEmail
+ SocialLookupEmailPageStatusResponse
Successful Response
Examples
@@ -3258,7 +6059,9 @@ async def personalize_email(
async def main() -> None:
- await client.personalize_email()
+ await client.personalize_email(
+ email_address="email_address",
+ )
asyncio.run(main())
@@ -3267,11 +6070,27 @@ async def main() -> None:
"v3/SocialLookupEmail/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "email_address": email_address,
+ "input_prompt": input_prompt,
+ "selected_model": selected_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "response_format_type": response_format_type,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormSocialLookupEmail, parse_obj_as(type_=BodyAsyncFormSocialLookupEmail, object_=_response.json())) # type: ignore
+ return typing.cast(SocialLookupEmailPageStatusResponse, parse_obj_as(type_=SocialLookupEmailPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -3298,19 +6117,65 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response_json)
async def bulk_run(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormBulkRunner:
+ self,
+ *,
+ documents: typing.List[str],
+ run_urls: typing.List[str],
+ input_columns: typing.Dict[str, str],
+ output_columns: typing.Dict[str, str],
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ eval_urls: typing.Optional[typing.List[str]] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> BulkRunnerPageStatusResponse:
"""
Parameters
----------
+ documents : typing.List[str]
+
+ Upload or link to a CSV or google sheet that contains your sample input data.
+ For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs.
+ Remember to includes header names in your CSV too.
+
+
+ run_urls : typing.List[str]
+
+ Provide one or more Gooey.AI workflow runs.
+ You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them.
+
+
+ input_columns : typing.Dict[str, str]
+
+ For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it.
+
+
+ output_columns : typing.Dict[str, str]
+
+ For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data.
+
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ eval_urls : typing.Optional[typing.List[str]]
+
+ _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs.
+
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormBulkRunner
+ BulkRunnerPageStatusResponse
Successful Response
Examples
@@ -3325,7 +6190,12 @@ async def bulk_run(
async def main() -> None:
- await client.bulk_run()
+ await client.bulk_run(
+ documents=["documents"],
+ run_urls=["run_urls"],
+ input_columns={"key": "value"},
+ output_columns={"key": "value"},
+ )
asyncio.run(main())
@@ -3334,11 +6204,23 @@ async def main() -> None:
"v3/bulk-runner/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "documents": documents,
+ "run_urls": run_urls,
+ "input_columns": input_columns,
+ "output_columns": output_columns,
+ "eval_urls": eval_urls,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormBulkRunner, parse_obj_as(type_=BodyAsyncFormBulkRunner, object_=_response.json())) # type: ignore
+ return typing.cast(BulkRunnerPageStatusResponse, parse_obj_as(type_=BulkRunnerPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -3365,19 +6247,73 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response_json)
async def synthesize_data(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormDocExtract:
+ self,
+ *,
+ documents: typing.List[str],
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ sheet_url: typing.Optional[str] = None,
+ selected_asr_model: typing.Optional[SynthesizeDataRequestSelectedAsrModel] = None,
+ google_translate_target: typing.Optional[str] = None,
+ glossary_document: typing.Optional[str] = None,
+ task_instructions: typing.Optional[str] = None,
+ selected_model: typing.Optional[SynthesizeDataRequestSelectedModel] = None,
+ avoid_repetition: typing.Optional[bool] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[float] = None,
+ max_tokens: typing.Optional[int] = None,
+ sampling_temperature: typing.Optional[float] = None,
+ response_format_type: typing.Optional[SynthesizeDataRequestResponseFormatType] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> DocExtractPageStatusResponse:
"""
Parameters
----------
+ documents : typing.List[str]
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ sheet_url : typing.Optional[str]
+
+ selected_asr_model : typing.Optional[SynthesizeDataRequestSelectedAsrModel]
+
+ google_translate_target : typing.Optional[str]
+
+ glossary_document : typing.Optional[str]
+ Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+ If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+
+ task_instructions : typing.Optional[str]
+
+ selected_model : typing.Optional[SynthesizeDataRequestSelectedModel]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ response_format_type : typing.Optional[SynthesizeDataRequestResponseFormatType]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormDocExtract
+ DocExtractPageStatusResponse
Successful Response
Examples
@@ -3392,7 +6328,9 @@ async def synthesize_data(
async def main() -> None:
- await client.synthesize_data()
+ await client.synthesize_data(
+ documents=["documents"],
+ )
asyncio.run(main())
@@ -3401,11 +6339,31 @@ async def main() -> None:
"v3/doc-extract/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "documents": documents,
+ "sheet_url": sheet_url,
+ "selected_asr_model": selected_asr_model,
+ "google_translate_target": google_translate_target,
+ "glossary_document": glossary_document,
+ "task_instructions": task_instructions,
+ "selected_model": selected_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "response_format_type": response_format_type,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormDocExtract, parse_obj_as(type_=BodyAsyncFormDocExtract, object_=_response.json())) # type: ignore
+ return typing.cast(DocExtractPageStatusResponse, parse_obj_as(type_=DocExtractPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -3432,19 +6390,56 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response_json)
async def llm(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormCompareLlm:
+ self,
+ *,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ input_prompt: typing.Optional[str] = None,
+ selected_models: typing.Optional[typing.List[LlmRequestSelectedModelsItem]] = None,
+ avoid_repetition: typing.Optional[bool] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[float] = None,
+ max_tokens: typing.Optional[int] = None,
+ sampling_temperature: typing.Optional[float] = None,
+ response_format_type: typing.Optional[LlmRequestResponseFormatType] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> CompareLlmPageStatusResponse:
"""
Parameters
----------
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ input_prompt : typing.Optional[str]
+
+ selected_models : typing.Optional[typing.List[LlmRequestSelectedModelsItem]]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ response_format_type : typing.Optional[LlmRequestResponseFormatType]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormCompareLlm
+ CompareLlmPageStatusResponse
Successful Response
Examples
@@ -3468,11 +6463,26 @@ async def main() -> None:
"v3/CompareLLM/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "input_prompt": input_prompt,
+ "selected_models": selected_models,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "response_format_type": response_format_type,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormCompareLlm, parse_obj_as(type_=BodyAsyncFormCompareLlm, object_=_response.json())) # type: ignore
+ return typing.cast(CompareLlmPageStatusResponse, parse_obj_as(type_=CompareLlmPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -3499,19 +6509,93 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response_json)
async def rag(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormDocSearch:
+ self,
+ *,
+ search_query: str,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ keyword_query: typing.Optional[RagRequestKeywordQuery] = None,
+ documents: typing.Optional[typing.List[str]] = None,
+ max_references: typing.Optional[int] = None,
+ max_context_words: typing.Optional[int] = None,
+ scroll_jump: typing.Optional[int] = None,
+ doc_extract_url: typing.Optional[str] = None,
+ embedding_model: typing.Optional[RagRequestEmbeddingModel] = None,
+ dense_weight: typing.Optional[float] = None,
+ task_instructions: typing.Optional[str] = None,
+ query_instructions: typing.Optional[str] = None,
+ selected_model: typing.Optional[RagRequestSelectedModel] = None,
+ citation_style: typing.Optional[RagRequestCitationStyle] = None,
+ avoid_repetition: typing.Optional[bool] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[float] = None,
+ max_tokens: typing.Optional[int] = None,
+ sampling_temperature: typing.Optional[float] = None,
+ response_format_type: typing.Optional[RagRequestResponseFormatType] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> DocSearchPageStatusResponse:
"""
Parameters
----------
+ search_query : str
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ keyword_query : typing.Optional[RagRequestKeywordQuery]
+
+ documents : typing.Optional[typing.List[str]]
+
+ max_references : typing.Optional[int]
+
+ max_context_words : typing.Optional[int]
+
+ scroll_jump : typing.Optional[int]
+
+ doc_extract_url : typing.Optional[str]
+
+ embedding_model : typing.Optional[RagRequestEmbeddingModel]
+
+ dense_weight : typing.Optional[float]
+
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+
+ task_instructions : typing.Optional[str]
+
+ query_instructions : typing.Optional[str]
+
+ selected_model : typing.Optional[RagRequestSelectedModel]
+
+ citation_style : typing.Optional[RagRequestCitationStyle]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ response_format_type : typing.Optional[RagRequestResponseFormatType]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormDocSearch
+ DocSearchPageStatusResponse
Successful Response
Examples
@@ -3526,7 +6610,9 @@ async def rag(
async def main() -> None:
- await client.rag()
+ await client.rag(
+ search_query="search_query",
+ )
asyncio.run(main())
@@ -3535,11 +6621,37 @@ async def main() -> None:
"v3/doc-search/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "search_query": search_query,
+ "keyword_query": keyword_query,
+ "documents": documents,
+ "max_references": max_references,
+ "max_context_words": max_context_words,
+ "scroll_jump": scroll_jump,
+ "doc_extract_url": doc_extract_url,
+ "embedding_model": embedding_model,
+ "dense_weight": dense_weight,
+ "task_instructions": task_instructions,
+ "query_instructions": query_instructions,
+ "selected_model": selected_model,
+ "citation_style": citation_style,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "response_format_type": response_format_type,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormDocSearch, parse_obj_as(type_=BodyAsyncFormDocSearch, object_=_response.json())) # type: ignore
+ return typing.cast(DocSearchPageStatusResponse, parse_obj_as(type_=DocSearchPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -3566,19 +6678,71 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response_json)
async def doc_summary(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormDocSummary:
+ self,
+ *,
+ documents: typing.List[str],
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ task_instructions: typing.Optional[str] = None,
+ merge_instructions: typing.Optional[str] = None,
+ selected_model: typing.Optional[DocSummaryRequestSelectedModel] = None,
+ chain_type: typing.Optional[typing.Literal["map_reduce"]] = None,
+ selected_asr_model: typing.Optional[DocSummaryRequestSelectedAsrModel] = None,
+ google_translate_target: typing.Optional[str] = None,
+ avoid_repetition: typing.Optional[bool] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[float] = None,
+ max_tokens: typing.Optional[int] = None,
+ sampling_temperature: typing.Optional[float] = None,
+ response_format_type: typing.Optional[DocSummaryRequestResponseFormatType] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> DocSummaryPageStatusResponse:
"""
Parameters
----------
+ documents : typing.List[str]
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ task_instructions : typing.Optional[str]
+
+ merge_instructions : typing.Optional[str]
+
+ selected_model : typing.Optional[DocSummaryRequestSelectedModel]
+
+ chain_type : typing.Optional[typing.Literal["map_reduce"]]
+
+ selected_asr_model : typing.Optional[DocSummaryRequestSelectedAsrModel]
+
+ google_translate_target : typing.Optional[str]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ response_format_type : typing.Optional[DocSummaryRequestResponseFormatType]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormDocSummary
+ DocSummaryPageStatusResponse
Successful Response
Examples
@@ -3593,7 +6757,9 @@ async def doc_summary(
async def main() -> None:
- await client.doc_summary()
+ await client.doc_summary(
+ documents=["documents"],
+ )
asyncio.run(main())
@@ -3602,11 +6768,31 @@ async def main() -> None:
"v3/doc-summary/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "documents": documents,
+ "task_instructions": task_instructions,
+ "merge_instructions": merge_instructions,
+ "selected_model": selected_model,
+ "chain_type": chain_type,
+ "selected_asr_model": selected_asr_model,
+ "google_translate_target": google_translate_target,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "response_format_type": response_format_type,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormDocSummary, parse_obj_as(type_=BodyAsyncFormDocSummary, object_=_response.json())) # type: ignore
+ return typing.cast(DocSummaryPageStatusResponse, parse_obj_as(type_=DocSummaryPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -3633,19 +6819,111 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response_json)
async def lipsync_tts(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormLipsyncTts:
+ self,
+ *,
+ text_prompt: str,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ tts_provider: typing.Optional[LipsyncTtsRequestTtsProvider] = None,
+ uberduck_voice_name: typing.Optional[str] = None,
+ uberduck_speaking_rate: typing.Optional[float] = None,
+ google_voice_name: typing.Optional[str] = None,
+ google_speaking_rate: typing.Optional[float] = None,
+ google_pitch: typing.Optional[float] = None,
+ bark_history_prompt: typing.Optional[str] = None,
+ elevenlabs_voice_name: typing.Optional[str] = None,
+ elevenlabs_api_key: typing.Optional[str] = None,
+ elevenlabs_voice_id: typing.Optional[str] = None,
+ elevenlabs_model: typing.Optional[str] = None,
+ elevenlabs_stability: typing.Optional[float] = None,
+ elevenlabs_similarity_boost: typing.Optional[float] = None,
+ elevenlabs_style: typing.Optional[float] = None,
+ elevenlabs_speaker_boost: typing.Optional[bool] = None,
+ azure_voice_name: typing.Optional[str] = None,
+ openai_voice_name: typing.Optional[LipsyncTtsRequestOpenaiVoiceName] = None,
+ openai_tts_model: typing.Optional[LipsyncTtsRequestOpenaiTtsModel] = None,
+ input_face: typing.Optional[str] = None,
+ face_padding_top: typing.Optional[int] = None,
+ face_padding_bottom: typing.Optional[int] = None,
+ face_padding_left: typing.Optional[int] = None,
+ face_padding_right: typing.Optional[int] = None,
+ sadtalker_settings: typing.Optional[SadTalkerSettings] = None,
+ selected_model: typing.Optional[LipsyncTtsRequestSelectedModel] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> LipsyncTtsPageStatusResponse:
"""
Parameters
----------
+ text_prompt : str
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ tts_provider : typing.Optional[LipsyncTtsRequestTtsProvider]
+
+ uberduck_voice_name : typing.Optional[str]
+
+ uberduck_speaking_rate : typing.Optional[float]
+
+ google_voice_name : typing.Optional[str]
+
+ google_speaking_rate : typing.Optional[float]
+
+ google_pitch : typing.Optional[float]
+
+ bark_history_prompt : typing.Optional[str]
+
+ elevenlabs_voice_name : typing.Optional[str]
+ Use `elevenlabs_voice_id` instead
+
+ elevenlabs_api_key : typing.Optional[str]
+
+ elevenlabs_voice_id : typing.Optional[str]
+
+ elevenlabs_model : typing.Optional[str]
+
+ elevenlabs_stability : typing.Optional[float]
+
+ elevenlabs_similarity_boost : typing.Optional[float]
+
+ elevenlabs_style : typing.Optional[float]
+
+ elevenlabs_speaker_boost : typing.Optional[bool]
+
+ azure_voice_name : typing.Optional[str]
+
+ openai_voice_name : typing.Optional[LipsyncTtsRequestOpenaiVoiceName]
+
+ openai_tts_model : typing.Optional[LipsyncTtsRequestOpenaiTtsModel]
+
+ input_face : typing.Optional[str]
+
+ face_padding_top : typing.Optional[int]
+
+ face_padding_bottom : typing.Optional[int]
+
+ face_padding_left : typing.Optional[int]
+
+ face_padding_right : typing.Optional[int]
+
+ sadtalker_settings : typing.Optional[SadTalkerSettings]
+
+ selected_model : typing.Optional[LipsyncTtsRequestSelectedModel]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormLipsyncTts
+ LipsyncTtsPageStatusResponse
Successful Response
Examples
@@ -3660,7 +6938,9 @@ async def lipsync_tts(
async def main() -> None:
- await client.lipsync_tts()
+ await client.lipsync_tts(
+ text_prompt="text_prompt",
+ )
asyncio.run(main())
@@ -3669,11 +6949,44 @@ async def main() -> None:
"v3/LipsyncTTS/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "text_prompt": text_prompt,
+ "tts_provider": tts_provider,
+ "uberduck_voice_name": uberduck_voice_name,
+ "uberduck_speaking_rate": uberduck_speaking_rate,
+ "google_voice_name": google_voice_name,
+ "google_speaking_rate": google_speaking_rate,
+ "google_pitch": google_pitch,
+ "bark_history_prompt": bark_history_prompt,
+ "elevenlabs_voice_name": elevenlabs_voice_name,
+ "elevenlabs_api_key": elevenlabs_api_key,
+ "elevenlabs_voice_id": elevenlabs_voice_id,
+ "elevenlabs_model": elevenlabs_model,
+ "elevenlabs_stability": elevenlabs_stability,
+ "elevenlabs_similarity_boost": elevenlabs_similarity_boost,
+ "elevenlabs_style": elevenlabs_style,
+ "elevenlabs_speaker_boost": elevenlabs_speaker_boost,
+ "azure_voice_name": azure_voice_name,
+ "openai_voice_name": openai_voice_name,
+ "openai_tts_model": openai_tts_model,
+ "input_face": input_face,
+ "face_padding_top": face_padding_top,
+ "face_padding_bottom": face_padding_bottom,
+ "face_padding_left": face_padding_left,
+ "face_padding_right": face_padding_right,
+ "sadtalker_settings": sadtalker_settings,
+ "selected_model": selected_model,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormLipsyncTts, parse_obj_as(type_=BodyAsyncFormLipsyncTts, object_=_response.json())) # type: ignore
+ return typing.cast(LipsyncTtsPageStatusResponse, parse_obj_as(type_=LipsyncTtsPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -3700,19 +7013,90 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response_json)
async def text_to_speech(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormTextToSpeech:
+ self,
+ *,
+ text_prompt: str,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ tts_provider: typing.Optional[TextToSpeechRequestTtsProvider] = None,
+ uberduck_voice_name: typing.Optional[str] = None,
+ uberduck_speaking_rate: typing.Optional[float] = None,
+ google_voice_name: typing.Optional[str] = None,
+ google_speaking_rate: typing.Optional[float] = None,
+ google_pitch: typing.Optional[float] = None,
+ bark_history_prompt: typing.Optional[str] = None,
+ elevenlabs_voice_name: typing.Optional[str] = None,
+ elevenlabs_api_key: typing.Optional[str] = None,
+ elevenlabs_voice_id: typing.Optional[str] = None,
+ elevenlabs_model: typing.Optional[str] = None,
+ elevenlabs_stability: typing.Optional[float] = None,
+ elevenlabs_similarity_boost: typing.Optional[float] = None,
+ elevenlabs_style: typing.Optional[float] = None,
+ elevenlabs_speaker_boost: typing.Optional[bool] = None,
+ azure_voice_name: typing.Optional[str] = None,
+ openai_voice_name: typing.Optional[TextToSpeechRequestOpenaiVoiceName] = None,
+ openai_tts_model: typing.Optional[TextToSpeechRequestOpenaiTtsModel] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> TextToSpeechPageStatusResponse:
"""
Parameters
----------
+ text_prompt : str
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ tts_provider : typing.Optional[TextToSpeechRequestTtsProvider]
+
+ uberduck_voice_name : typing.Optional[str]
+
+ uberduck_speaking_rate : typing.Optional[float]
+
+ google_voice_name : typing.Optional[str]
+
+ google_speaking_rate : typing.Optional[float]
+
+ google_pitch : typing.Optional[float]
+
+ bark_history_prompt : typing.Optional[str]
+
+ elevenlabs_voice_name : typing.Optional[str]
+ Use `elevenlabs_voice_id` instead
+
+ elevenlabs_api_key : typing.Optional[str]
+
+ elevenlabs_voice_id : typing.Optional[str]
+
+ elevenlabs_model : typing.Optional[str]
+
+ elevenlabs_stability : typing.Optional[float]
+
+ elevenlabs_similarity_boost : typing.Optional[float]
+
+ elevenlabs_style : typing.Optional[float]
+
+ elevenlabs_speaker_boost : typing.Optional[bool]
+
+ azure_voice_name : typing.Optional[str]
+
+ openai_voice_name : typing.Optional[TextToSpeechRequestOpenaiVoiceName]
+
+ openai_tts_model : typing.Optional[TextToSpeechRequestOpenaiTtsModel]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormTextToSpeech
+ TextToSpeechPageStatusResponse
Successful Response
Examples
@@ -3727,7 +7111,9 @@ async def text_to_speech(
async def main() -> None:
- await client.text_to_speech()
+ await client.text_to_speech(
+ text_prompt="text_prompt",
+ )
asyncio.run(main())
@@ -3736,11 +7122,37 @@ async def main() -> None:
"v3/TextToSpeech/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "text_prompt": text_prompt,
+ "tts_provider": tts_provider,
+ "uberduck_voice_name": uberduck_voice_name,
+ "uberduck_speaking_rate": uberduck_speaking_rate,
+ "google_voice_name": google_voice_name,
+ "google_speaking_rate": google_speaking_rate,
+ "google_pitch": google_pitch,
+ "bark_history_prompt": bark_history_prompt,
+ "elevenlabs_voice_name": elevenlabs_voice_name,
+ "elevenlabs_api_key": elevenlabs_api_key,
+ "elevenlabs_voice_id": elevenlabs_voice_id,
+ "elevenlabs_model": elevenlabs_model,
+ "elevenlabs_stability": elevenlabs_stability,
+ "elevenlabs_similarity_boost": elevenlabs_similarity_boost,
+ "elevenlabs_style": elevenlabs_style,
+ "elevenlabs_speaker_boost": elevenlabs_speaker_boost,
+ "azure_voice_name": azure_voice_name,
+ "openai_voice_name": openai_voice_name,
+ "openai_tts_model": openai_tts_model,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormTextToSpeech, parse_obj_as(type_=BodyAsyncFormTextToSpeech, object_=_response.json())) # type: ignore
+ return typing.cast(TextToSpeechPageStatusResponse, parse_obj_as(type_=TextToSpeechPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -3767,19 +7179,62 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response_json)
async def speech_recognition(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormAsr:
+ self,
+ *,
+ documents: typing.List[str],
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ selected_model: typing.Optional[SpeechRecognitionRequestSelectedModel] = None,
+ language: typing.Optional[str] = None,
+ translation_model: typing.Optional[SpeechRecognitionRequestTranslationModel] = None,
+ output_format: typing.Optional[SpeechRecognitionRequestOutputFormat] = None,
+ google_translate_target: typing.Optional[str] = None,
+ translation_source: typing.Optional[str] = None,
+ translation_target: typing.Optional[str] = None,
+ glossary_document: typing.Optional[str] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsrPageStatusResponse:
"""
Parameters
----------
+ documents : typing.List[str]
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ selected_model : typing.Optional[SpeechRecognitionRequestSelectedModel]
+
+ language : typing.Optional[str]
+
+ translation_model : typing.Optional[SpeechRecognitionRequestTranslationModel]
+
+ output_format : typing.Optional[SpeechRecognitionRequestOutputFormat]
+
+ google_translate_target : typing.Optional[str]
+ use `translation_model` & `translation_target` instead.
+
+ translation_source : typing.Optional[str]
+
+ translation_target : typing.Optional[str]
+
+ glossary_document : typing.Optional[str]
+ Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+ If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormAsr
+ AsrPageStatusResponse
Successful Response
Examples
@@ -3794,17 +7249,38 @@ async def speech_recognition(
async def main() -> None:
- await client.speech_recognition()
+ await client.speech_recognition(
+ documents=["documents"],
+ )
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v3/asr/async/form", method="POST", params={"example_id": example_id}, request_options=request_options
+ "v3/asr/async/form",
+ method="POST",
+ params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "documents": documents,
+ "selected_model": selected_model,
+ "language": language,
+ "translation_model": translation_model,
+ "output_format": output_format,
+ "google_translate_target": google_translate_target,
+ "translation_source": translation_source,
+ "translation_target": translation_target,
+ "glossary_document": glossary_document,
+ "settings": settings,
+ },
+ files={},
+ request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormAsr, parse_obj_as(type_=BodyAsyncFormAsr, object_=_response.json())) # type: ignore
+ return typing.cast(AsrPageStatusResponse, parse_obj_as(type_=AsrPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -3831,19 +7307,59 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response_json)
async def text_to_music(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormText2Audio:
+ self,
+ *,
+ text_prompt: str,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ negative_prompt: typing.Optional[str] = None,
+ duration_sec: typing.Optional[float] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[int] = None,
+ guidance_scale: typing.Optional[float] = None,
+ seed: typing.Optional[int] = None,
+ sd2upscaling: typing.Optional[bool] = None,
+ selected_models: typing.Optional[typing.List[typing.Literal["audio_ldm"]]] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> Text2AudioPageStatusResponse:
"""
Parameters
----------
+ text_prompt : str
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ negative_prompt : typing.Optional[str]
+
+ duration_sec : typing.Optional[float]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ seed : typing.Optional[int]
+
+ sd2upscaling : typing.Optional[bool]
+
+ selected_models : typing.Optional[typing.List[typing.Literal["audio_ldm"]]]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormText2Audio
+ Text2AudioPageStatusResponse
Successful Response
Examples
@@ -3858,7 +7374,9 @@ async def text_to_music(
async def main() -> None:
- await client.text_to_music()
+ await client.text_to_music(
+ text_prompt="text_prompt",
+ )
asyncio.run(main())
@@ -3867,11 +7385,27 @@ async def main() -> None:
"v3/text2audio/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "text_prompt": text_prompt,
+ "negative_prompt": negative_prompt,
+ "duration_sec": duration_sec,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "guidance_scale": guidance_scale,
+ "seed": seed,
+ "sd_2_upscaling": sd2upscaling,
+ "selected_models": selected_models,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormText2Audio, parse_obj_as(type_=BodyAsyncFormText2Audio, object_=_response.json())) # type: ignore
+ return typing.cast(Text2AudioPageStatusResponse, parse_obj_as(type_=Text2AudioPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -3898,19 +7432,49 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response_json)
async def translate(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormTranslate:
+ self,
+ *,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ texts: typing.Optional[typing.List[str]] = None,
+ selected_model: typing.Optional[TranslateRequestSelectedModel] = None,
+ translation_source: typing.Optional[str] = None,
+ translation_target: typing.Optional[str] = None,
+ glossary_document: typing.Optional[str] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> TranslationPageStatusResponse:
"""
Parameters
----------
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ texts : typing.Optional[typing.List[str]]
+
+ selected_model : typing.Optional[TranslateRequestSelectedModel]
+
+ translation_source : typing.Optional[str]
+
+ translation_target : typing.Optional[str]
+
+ glossary_document : typing.Optional[str]
+ Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+ If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormTranslate
+ TranslationPageStatusResponse
Successful Response
Examples
@@ -3931,11 +7495,26 @@ async def main() -> None:
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v3/translate/async/form", method="POST", params={"example_id": example_id}, request_options=request_options
+ "v3/translate/async/form",
+ method="POST",
+ params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "texts": texts,
+ "selected_model": selected_model,
+ "translation_source": translation_source,
+ "translation_target": translation_target,
+ "glossary_document": glossary_document,
+ "settings": settings,
+ },
+ files={},
+ request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormTranslate, parse_obj_as(type_=BodyAsyncFormTranslate, object_=_response.json())) # type: ignore
+ return typing.cast(TranslationPageStatusResponse, parse_obj_as(type_=TranslationPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -3962,19 +7541,74 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response_json)
async def remix_image(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormImg2Img:
+ self,
+ *,
+ input_image: str,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ text_prompt: typing.Optional[str] = None,
+ selected_model: typing.Optional[RemixImageRequestSelectedModel] = None,
+ selected_controlnet_model: typing.Optional[RemixImageRequestSelectedControlnetModel] = None,
+ negative_prompt: typing.Optional[str] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[int] = None,
+ output_width: typing.Optional[int] = None,
+ output_height: typing.Optional[int] = None,
+ guidance_scale: typing.Optional[float] = None,
+ prompt_strength: typing.Optional[float] = None,
+ controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None,
+ seed: typing.Optional[int] = None,
+ image_guidance_scale: typing.Optional[float] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> Img2ImgPageStatusResponse:
"""
Parameters
----------
+ input_image : str
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ text_prompt : typing.Optional[str]
+
+ selected_model : typing.Optional[RemixImageRequestSelectedModel]
+
+ selected_controlnet_model : typing.Optional[RemixImageRequestSelectedControlnetModel]
+
+ negative_prompt : typing.Optional[str]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ output_width : typing.Optional[int]
+
+ output_height : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ prompt_strength : typing.Optional[float]
+
+ controlnet_conditioning_scale : typing.Optional[typing.List[float]]
+
+ seed : typing.Optional[int]
+
+ image_guidance_scale : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormImg2Img
+ Img2ImgPageStatusResponse
Successful Response
Examples
@@ -3989,17 +7623,43 @@ async def remix_image(
async def main() -> None:
- await client.remix_image()
+ await client.remix_image(
+ input_image="input_image",
+ )
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v3/Img2Img/async/form", method="POST", params={"example_id": example_id}, request_options=request_options
+ "v3/Img2Img/async/form",
+ method="POST",
+ params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "input_image": input_image,
+ "text_prompt": text_prompt,
+ "selected_model": selected_model,
+ "selected_controlnet_model": selected_controlnet_model,
+ "negative_prompt": negative_prompt,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "output_width": output_width,
+ "output_height": output_height,
+ "guidance_scale": guidance_scale,
+ "prompt_strength": prompt_strength,
+ "controlnet_conditioning_scale": controlnet_conditioning_scale,
+ "seed": seed,
+ "image_guidance_scale": image_guidance_scale,
+ "settings": settings,
+ },
+ files={},
+ request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormImg2Img, parse_obj_as(type_=BodyAsyncFormImg2Img, object_=_response.json())) # type: ignore
+ return typing.cast(Img2ImgPageStatusResponse, parse_obj_as(type_=Img2ImgPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -4026,19 +7686,77 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response_json)
async def text_to_image(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormCompareText2Img:
+ self,
+ *,
+ text_prompt: str,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ negative_prompt: typing.Optional[str] = None,
+ output_width: typing.Optional[int] = None,
+ output_height: typing.Optional[int] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[int] = None,
+ dall_e3quality: typing.Optional[str] = None,
+ dall_e3style: typing.Optional[str] = None,
+ guidance_scale: typing.Optional[float] = None,
+ seed: typing.Optional[int] = None,
+ sd2upscaling: typing.Optional[bool] = None,
+ selected_models: typing.Optional[typing.List[TextToImageRequestSelectedModelsItem]] = None,
+ scheduler: typing.Optional[TextToImageRequestScheduler] = None,
+ edit_instruction: typing.Optional[str] = None,
+ image_guidance_scale: typing.Optional[float] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> CompareText2ImgPageStatusResponse:
"""
Parameters
----------
+ text_prompt : str
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ negative_prompt : typing.Optional[str]
+
+ output_width : typing.Optional[int]
+
+ output_height : typing.Optional[int]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ dall_e3quality : typing.Optional[str]
+
+ dall_e3style : typing.Optional[str]
+
+ guidance_scale : typing.Optional[float]
+
+ seed : typing.Optional[int]
+
+ sd2upscaling : typing.Optional[bool]
+
+ selected_models : typing.Optional[typing.List[TextToImageRequestSelectedModelsItem]]
+
+ scheduler : typing.Optional[TextToImageRequestScheduler]
+
+ edit_instruction : typing.Optional[str]
+
+ image_guidance_scale : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormCompareText2Img
+ CompareText2ImgPageStatusResponse
Successful Response
Examples
@@ -4053,7 +7771,9 @@ async def text_to_image(
async def main() -> None:
- await client.text_to_image()
+ await client.text_to_image(
+ text_prompt="text_prompt",
+ )
asyncio.run(main())
@@ -4062,11 +7782,33 @@ async def main() -> None:
"v3/CompareText2Img/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "text_prompt": text_prompt,
+ "negative_prompt": negative_prompt,
+ "output_width": output_width,
+ "output_height": output_height,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "dall_e_3_quality": dall_e3quality,
+ "dall_e_3_style": dall_e3style,
+ "guidance_scale": guidance_scale,
+ "seed": seed,
+ "sd_2_upscaling": sd2upscaling,
+ "selected_models": selected_models,
+ "scheduler": scheduler,
+ "edit_instruction": edit_instruction,
+ "image_guidance_scale": image_guidance_scale,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormCompareText2Img, parse_obj_as(type_=BodyAsyncFormCompareText2Img, object_=_response.json())) # type: ignore
+ return typing.cast(CompareText2ImgPageStatusResponse, parse_obj_as(type_=CompareText2ImgPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -4093,19 +7835,77 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response_json)
async def product_image(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormObjectInpainting:
+ self,
+ *,
+ input_image: str,
+ text_prompt: str,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ obj_scale: typing.Optional[float] = None,
+ obj_pos_x: typing.Optional[float] = None,
+ obj_pos_y: typing.Optional[float] = None,
+ mask_threshold: typing.Optional[float] = None,
+ selected_model: typing.Optional[ProductImageRequestSelectedModel] = None,
+ negative_prompt: typing.Optional[str] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[int] = None,
+ output_width: typing.Optional[int] = None,
+ output_height: typing.Optional[int] = None,
+ guidance_scale: typing.Optional[float] = None,
+ sd2upscaling: typing.Optional[bool] = None,
+ seed: typing.Optional[int] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> ObjectInpaintingPageStatusResponse:
"""
Parameters
----------
+ input_image : str
+
+ text_prompt : str
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ obj_scale : typing.Optional[float]
+
+ obj_pos_x : typing.Optional[float]
+
+ obj_pos_y : typing.Optional[float]
+
+ mask_threshold : typing.Optional[float]
+
+ selected_model : typing.Optional[ProductImageRequestSelectedModel]
+
+ negative_prompt : typing.Optional[str]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ output_width : typing.Optional[int]
+
+ output_height : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ sd2upscaling : typing.Optional[bool]
+
+ seed : typing.Optional[int]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormObjectInpainting
+ ObjectInpaintingPageStatusResponse
Successful Response
Examples
@@ -4120,7 +7920,10 @@ async def product_image(
async def main() -> None:
- await client.product_image()
+ await client.product_image(
+ input_image="input_image",
+ text_prompt="text_prompt",
+ )
asyncio.run(main())
@@ -4129,11 +7932,33 @@ async def main() -> None:
"v3/ObjectInpainting/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "input_image": input_image,
+ "text_prompt": text_prompt,
+ "obj_scale": obj_scale,
+ "obj_pos_x": obj_pos_x,
+ "obj_pos_y": obj_pos_y,
+ "mask_threshold": mask_threshold,
+ "selected_model": selected_model,
+ "negative_prompt": negative_prompt,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "output_width": output_width,
+ "output_height": output_height,
+ "guidance_scale": guidance_scale,
+ "sd_2_upscaling": sd2upscaling,
+ "seed": seed,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormObjectInpainting, parse_obj_as(type_=BodyAsyncFormObjectInpainting, object_=_response.json())) # type: ignore
+ return typing.cast(ObjectInpaintingPageStatusResponse, parse_obj_as(type_=ObjectInpaintingPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -4160,19 +7985,74 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response_json)
async def portrait(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormFaceInpainting:
+ self,
+ *,
+ input_image: str,
+ text_prompt: str,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ face_scale: typing.Optional[float] = None,
+ face_pos_x: typing.Optional[float] = None,
+ face_pos_y: typing.Optional[float] = None,
+ selected_model: typing.Optional[PortraitRequestSelectedModel] = None,
+ negative_prompt: typing.Optional[str] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[int] = None,
+ upscale_factor: typing.Optional[float] = None,
+ output_width: typing.Optional[int] = None,
+ output_height: typing.Optional[int] = None,
+ guidance_scale: typing.Optional[float] = None,
+ seed: typing.Optional[int] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> FaceInpaintingPageStatusResponse:
"""
Parameters
----------
+ input_image : str
+
+ text_prompt : str
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ face_scale : typing.Optional[float]
+
+ face_pos_x : typing.Optional[float]
+
+ face_pos_y : typing.Optional[float]
+
+ selected_model : typing.Optional[PortraitRequestSelectedModel]
+
+ negative_prompt : typing.Optional[str]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ upscale_factor : typing.Optional[float]
+
+ output_width : typing.Optional[int]
+
+ output_height : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ seed : typing.Optional[int]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormFaceInpainting
+ FaceInpaintingPageStatusResponse
Successful Response
Examples
@@ -4187,7 +8067,10 @@ async def portrait(
async def main() -> None:
- await client.portrait()
+ await client.portrait(
+ input_image="input_image",
+ text_prompt="text_prompt",
+ )
asyncio.run(main())
@@ -4196,11 +8079,32 @@ async def main() -> None:
"v3/FaceInpainting/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "input_image": input_image,
+ "text_prompt": text_prompt,
+ "face_scale": face_scale,
+ "face_pos_x": face_pos_x,
+ "face_pos_y": face_pos_y,
+ "selected_model": selected_model,
+ "negative_prompt": negative_prompt,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "upscale_factor": upscale_factor,
+ "output_width": output_width,
+ "output_height": output_height,
+ "guidance_scale": guidance_scale,
+ "seed": seed,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormFaceInpainting, parse_obj_as(type_=BodyAsyncFormFaceInpainting, object_=_response.json())) # type: ignore
+ return typing.cast(FaceInpaintingPageStatusResponse, parse_obj_as(type_=FaceInpaintingPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -4227,19 +8131,101 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response_json)
async def image_from_email(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormEmailFaceInpainting:
+ self,
+ *,
+ text_prompt: str,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ email_address: typing.Optional[str] = None,
+ twitter_handle: typing.Optional[str] = None,
+ face_scale: typing.Optional[float] = None,
+ face_pos_x: typing.Optional[float] = None,
+ face_pos_y: typing.Optional[float] = None,
+ selected_model: typing.Optional[ImageFromEmailRequestSelectedModel] = None,
+ negative_prompt: typing.Optional[str] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[int] = None,
+ upscale_factor: typing.Optional[float] = None,
+ output_width: typing.Optional[int] = None,
+ output_height: typing.Optional[int] = None,
+ guidance_scale: typing.Optional[float] = None,
+ should_send_email: typing.Optional[bool] = None,
+ email_from: typing.Optional[str] = None,
+ email_cc: typing.Optional[str] = None,
+ email_bcc: typing.Optional[str] = None,
+ email_subject: typing.Optional[str] = None,
+ email_body: typing.Optional[str] = None,
+ email_body_enable_html: typing.Optional[bool] = None,
+ fallback_email_body: typing.Optional[str] = None,
+ seed: typing.Optional[int] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> EmailFaceInpaintingPageStatusResponse:
"""
Parameters
----------
+ text_prompt : str
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ email_address : typing.Optional[str]
+
+ twitter_handle : typing.Optional[str]
+
+ face_scale : typing.Optional[float]
+
+ face_pos_x : typing.Optional[float]
+
+ face_pos_y : typing.Optional[float]
+
+ selected_model : typing.Optional[ImageFromEmailRequestSelectedModel]
+
+ negative_prompt : typing.Optional[str]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ upscale_factor : typing.Optional[float]
+
+ output_width : typing.Optional[int]
+
+ output_height : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ should_send_email : typing.Optional[bool]
+
+ email_from : typing.Optional[str]
+
+ email_cc : typing.Optional[str]
+
+ email_bcc : typing.Optional[str]
+
+ email_subject : typing.Optional[str]
+
+ email_body : typing.Optional[str]
+
+ email_body_enable_html : typing.Optional[bool]
+
+ fallback_email_body : typing.Optional[str]
+
+ seed : typing.Optional[int]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormEmailFaceInpainting
+ EmailFaceInpaintingPageStatusResponse
Successful Response
Examples
@@ -4254,7 +8240,9 @@ async def image_from_email(
async def main() -> None:
- await client.image_from_email()
+ await client.image_from_email(
+ text_prompt="text_prompt",
+ )
asyncio.run(main())
@@ -4263,11 +8251,41 @@ async def main() -> None:
"v3/EmailFaceInpainting/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "email_address": email_address,
+ "twitter_handle": twitter_handle,
+ "text_prompt": text_prompt,
+ "face_scale": face_scale,
+ "face_pos_x": face_pos_x,
+ "face_pos_y": face_pos_y,
+ "selected_model": selected_model,
+ "negative_prompt": negative_prompt,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "upscale_factor": upscale_factor,
+ "output_width": output_width,
+ "output_height": output_height,
+ "guidance_scale": guidance_scale,
+ "should_send_email": should_send_email,
+ "email_from": email_from,
+ "email_cc": email_cc,
+ "email_bcc": email_bcc,
+ "email_subject": email_subject,
+ "email_body": email_body,
+ "email_body_enable_html": email_body_enable_html,
+ "fallback_email_body": fallback_email_body,
+ "seed": seed,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormEmailFaceInpainting, parse_obj_as(type_=BodyAsyncFormEmailFaceInpainting, object_=_response.json())) # type: ignore
+ return typing.cast(EmailFaceInpaintingPageStatusResponse, parse_obj_as(type_=EmailFaceInpaintingPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -4294,19 +8312,72 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response_json)
async def image_from_web_search(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormGoogleImageGen:
+ self,
+ *,
+ search_query: str,
+ text_prompt: str,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ serp_search_location: typing.Optional[SerpSearchLocation] = None,
+ scaleserp_locations: typing.Optional[typing.List[str]] = None,
+ selected_model: typing.Optional[ImageFromWebSearchRequestSelectedModel] = None,
+ negative_prompt: typing.Optional[str] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[int] = None,
+ guidance_scale: typing.Optional[float] = None,
+ prompt_strength: typing.Optional[float] = None,
+ sd2upscaling: typing.Optional[bool] = None,
+ seed: typing.Optional[int] = None,
+ image_guidance_scale: typing.Optional[float] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> GoogleImageGenPageStatusResponse:
"""
Parameters
----------
+ search_query : str
+
+ text_prompt : str
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ serp_search_location : typing.Optional[SerpSearchLocation]
+
+ scaleserp_locations : typing.Optional[typing.List[str]]
+ DEPRECATED: use `serp_search_location` instead
+
+ selected_model : typing.Optional[ImageFromWebSearchRequestSelectedModel]
+
+ negative_prompt : typing.Optional[str]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ prompt_strength : typing.Optional[float]
+
+ sd2upscaling : typing.Optional[bool]
+
+ seed : typing.Optional[int]
+
+ image_guidance_scale : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormGoogleImageGen
+ GoogleImageGenPageStatusResponse
Successful Response
Examples
@@ -4321,7 +8392,10 @@ async def image_from_web_search(
async def main() -> None:
- await client.image_from_web_search()
+ await client.image_from_web_search(
+ search_query="search_query",
+ text_prompt="text_prompt",
+ )
asyncio.run(main())
@@ -4330,11 +8404,31 @@ async def main() -> None:
"v3/GoogleImageGen/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "serp_search_location": serp_search_location,
+ "scaleserp_locations": scaleserp_locations,
+ "search_query": search_query,
+ "text_prompt": text_prompt,
+ "selected_model": selected_model,
+ "negative_prompt": negative_prompt,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "guidance_scale": guidance_scale,
+ "prompt_strength": prompt_strength,
+ "sd_2_upscaling": sd2upscaling,
+ "seed": seed,
+ "image_guidance_scale": image_guidance_scale,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormGoogleImageGen, parse_obj_as(type_=BodyAsyncFormGoogleImageGen, object_=_response.json())) # type: ignore
+ return typing.cast(GoogleImageGenPageStatusResponse, parse_obj_as(type_=GoogleImageGenPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -4361,19 +8455,56 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response_json)
async def remove_background(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormImageSegmentation:
+ self,
+ *,
+ input_image: str,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ selected_model: typing.Optional[RemoveBackgroundRequestSelectedModel] = None,
+ mask_threshold: typing.Optional[float] = None,
+ rect_persepective_transform: typing.Optional[bool] = None,
+ reflection_opacity: typing.Optional[float] = None,
+ obj_scale: typing.Optional[float] = None,
+ obj_pos_x: typing.Optional[float] = None,
+ obj_pos_y: typing.Optional[float] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> ImageSegmentationPageStatusResponse:
"""
Parameters
----------
+ input_image : str
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ selected_model : typing.Optional[RemoveBackgroundRequestSelectedModel]
+
+ mask_threshold : typing.Optional[float]
+
+ rect_persepective_transform : typing.Optional[bool]
+
+ reflection_opacity : typing.Optional[float]
+
+ obj_scale : typing.Optional[float]
+
+ obj_pos_x : typing.Optional[float]
+
+ obj_pos_y : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormImageSegmentation
+ ImageSegmentationPageStatusResponse
Successful Response
Examples
@@ -4388,7 +8519,9 @@ async def remove_background(
async def main() -> None:
- await client.remove_background()
+ await client.remove_background(
+ input_image="input_image",
+ )
asyncio.run(main())
@@ -4397,11 +8530,26 @@ async def main() -> None:
"v3/ImageSegmentation/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "input_image": input_image,
+ "selected_model": selected_model,
+ "mask_threshold": mask_threshold,
+ "rect_persepective_transform": rect_persepective_transform,
+ "reflection_opacity": reflection_opacity,
+ "obj_scale": obj_scale,
+ "obj_pos_x": obj_pos_x,
+ "obj_pos_y": obj_pos_y,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormImageSegmentation, parse_obj_as(type_=BodyAsyncFormImageSegmentation, object_=_response.json())) # type: ignore
+ return typing.cast(ImageSegmentationPageStatusResponse, parse_obj_as(type_=ImageSegmentationPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -4428,19 +8576,50 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response_json)
async def upscale(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormCompareAiUpscalers:
+ self,
+ *,
+ scale: int,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ input_image: typing.Optional[str] = None,
+ input_video: typing.Optional[str] = None,
+ selected_models: typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]] = None,
+ selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> CompareUpscalerPageStatusResponse:
"""
Parameters
----------
+ scale : int
+ The final upsampling scale of the image
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ input_image : typing.Optional[str]
+ Input Image
+
+ input_video : typing.Optional[str]
+ Input Video
+
+ selected_models : typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]]
+
+ selected_bg_model : typing.Optional[typing.Literal["real_esrgan_x2"]]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormCompareAiUpscalers
+ CompareUpscalerPageStatusResponse
Successful Response
Examples
@@ -4455,7 +8634,9 @@ async def upscale(
async def main() -> None:
- await client.upscale()
+ await client.upscale(
+ scale=1,
+ )
asyncio.run(main())
@@ -4464,11 +8645,23 @@ async def main() -> None:
"v3/compare-ai-upscalers/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "input_image": input_image,
+ "input_video": input_video,
+ "scale": scale,
+ "selected_models": selected_models,
+ "selected_bg_model": selected_bg_model,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormCompareAiUpscalers, parse_obj_as(type_=BodyAsyncFormCompareAiUpscalers, object_=_response.json())) # type: ignore
+ return typing.cast(CompareUpscalerPageStatusResponse, parse_obj_as(type_=CompareUpscalerPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -4495,19 +8688,38 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response_json)
async def embed(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormEmbeddings:
+ self,
+ *,
+ texts: typing.List[str],
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ selected_model: typing.Optional[EmbedRequestSelectedModel] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> EmbeddingsPageStatusResponse:
"""
Parameters
----------
+ texts : typing.List[str]
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ selected_model : typing.Optional[EmbedRequestSelectedModel]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormEmbeddings
+ EmbeddingsPageStatusResponse
Successful Response
Examples
@@ -4522,7 +8734,9 @@ async def embed(
async def main() -> None:
- await client.embed()
+ await client.embed(
+ texts=["texts"],
+ )
asyncio.run(main())
@@ -4531,11 +8745,20 @@ async def main() -> None:
"v3/embeddings/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "texts": texts,
+ "selected_model": selected_model,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormEmbeddings, parse_obj_as(type_=BodyAsyncFormEmbeddings, object_=_response.json())) # type: ignore
+ return typing.cast(EmbeddingsPageStatusResponse, parse_obj_as(type_=EmbeddingsPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -4562,19 +8785,107 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response_json)
async def seo_people_also_ask_doc(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormRelatedQnaMakerDoc:
+ self,
+ *,
+ search_query: str,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ keyword_query: typing.Optional[SeoPeopleAlsoAskDocRequestKeywordQuery] = None,
+ documents: typing.Optional[typing.List[str]] = None,
+ max_references: typing.Optional[int] = None,
+ max_context_words: typing.Optional[int] = None,
+ scroll_jump: typing.Optional[int] = None,
+ doc_extract_url: typing.Optional[str] = None,
+ embedding_model: typing.Optional[SeoPeopleAlsoAskDocRequestEmbeddingModel] = None,
+ dense_weight: typing.Optional[float] = None,
+ task_instructions: typing.Optional[str] = None,
+ query_instructions: typing.Optional[str] = None,
+ selected_model: typing.Optional[SeoPeopleAlsoAskDocRequestSelectedModel] = None,
+ citation_style: typing.Optional[SeoPeopleAlsoAskDocRequestCitationStyle] = None,
+ avoid_repetition: typing.Optional[bool] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[float] = None,
+ max_tokens: typing.Optional[int] = None,
+ sampling_temperature: typing.Optional[float] = None,
+ response_format_type: typing.Optional[SeoPeopleAlsoAskDocRequestResponseFormatType] = None,
+ serp_search_location: typing.Optional[SerpSearchLocation] = None,
+ scaleserp_locations: typing.Optional[typing.List[str]] = None,
+ serp_search_type: typing.Optional[SerpSearchType] = None,
+ scaleserp_search_field: typing.Optional[str] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> RelatedQnADocPageStatusResponse:
"""
Parameters
----------
+ search_query : str
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ keyword_query : typing.Optional[SeoPeopleAlsoAskDocRequestKeywordQuery]
+
+ documents : typing.Optional[typing.List[str]]
+
+ max_references : typing.Optional[int]
+
+ max_context_words : typing.Optional[int]
+
+ scroll_jump : typing.Optional[int]
+
+ doc_extract_url : typing.Optional[str]
+
+ embedding_model : typing.Optional[SeoPeopleAlsoAskDocRequestEmbeddingModel]
+
+ dense_weight : typing.Optional[float]
+
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+
+ task_instructions : typing.Optional[str]
+
+ query_instructions : typing.Optional[str]
+
+ selected_model : typing.Optional[SeoPeopleAlsoAskDocRequestSelectedModel]
+
+ citation_style : typing.Optional[SeoPeopleAlsoAskDocRequestCitationStyle]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ response_format_type : typing.Optional[SeoPeopleAlsoAskDocRequestResponseFormatType]
+
+ serp_search_location : typing.Optional[SerpSearchLocation]
+
+ scaleserp_locations : typing.Optional[typing.List[str]]
+ DEPRECATED: use `serp_search_location` instead
+
+ serp_search_type : typing.Optional[SerpSearchType]
+
+ scaleserp_search_field : typing.Optional[str]
+ DEPRECATED: use `serp_search_type` instead
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormRelatedQnaMakerDoc
+ RelatedQnADocPageStatusResponse
Successful Response
Examples
@@ -4589,7 +8900,9 @@ async def seo_people_also_ask_doc(
async def main() -> None:
- await client.seo_people_also_ask_doc()
+ await client.seo_people_also_ask_doc(
+ search_query="search_query",
+ )
asyncio.run(main())
@@ -4598,11 +8911,41 @@ async def main() -> None:
"v3/related-qna-maker-doc/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "search_query": search_query,
+ "keyword_query": keyword_query,
+ "documents": documents,
+ "max_references": max_references,
+ "max_context_words": max_context_words,
+ "scroll_jump": scroll_jump,
+ "doc_extract_url": doc_extract_url,
+ "embedding_model": embedding_model,
+ "dense_weight": dense_weight,
+ "task_instructions": task_instructions,
+ "query_instructions": query_instructions,
+ "selected_model": selected_model,
+ "citation_style": citation_style,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "response_format_type": response_format_type,
+ "serp_search_location": serp_search_location,
+ "scaleserp_locations": scaleserp_locations,
+ "serp_search_type": serp_search_type,
+ "scaleserp_search_field": scaleserp_search_field,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormRelatedQnaMakerDoc, parse_obj_as(type_=BodyAsyncFormRelatedQnaMakerDoc, object_=_response.json())) # type: ignore
+ return typing.cast(RelatedQnADocPageStatusResponse, parse_obj_as(type_=RelatedQnADocPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -4668,9 +9011,7 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def post_v3chyron_plant_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> ChyronPlantPageResponse:
+ async def post_v3chyron_plant_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -4679,7 +9020,7 @@ async def post_v3chyron_plant_async(
Returns
-------
- ChyronPlantPageResponse
+ typing.Any
Successful Response
Examples
@@ -4704,15 +9045,13 @@ async def main() -> None:
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(ChyronPlantPageResponse, parse_obj_as(type_=ChyronPlantPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def post_v3compare_llm_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> CompareLlmPageResponse:
+ async def post_v3compare_llm_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -4721,7 +9060,7 @@ async def post_v3compare_llm_async(
Returns
-------
- CompareLlmPageResponse
+ typing.Any
Successful Response
Examples
@@ -4746,7 +9085,7 @@ async def main() -> None:
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(CompareLlmPageResponse, parse_obj_as(type_=CompareLlmPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
@@ -4754,7 +9093,7 @@ async def main() -> None:
async def post_v3compare_text2img_async(
self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> CompareText2ImgPageResponse:
+ ) -> typing.Any:
"""
Parameters
----------
@@ -4763,7 +9102,7 @@ async def post_v3compare_text2img_async(
Returns
-------
- CompareText2ImgPageResponse
+ typing.Any
Successful Response
Examples
@@ -4788,15 +9127,13 @@ async def main() -> None:
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(CompareText2ImgPageResponse, parse_obj_as(type_=CompareText2ImgPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def post_v3deforum_sd_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> DeforumSdPageResponse:
+ async def post_v3deforum_sd_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -4805,7 +9142,7 @@ async def post_v3deforum_sd_async(
Returns
-------
- DeforumSdPageResponse
+ typing.Any
Successful Response
Examples
@@ -4830,7 +9167,7 @@ async def main() -> None:
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(DeforumSdPageResponse, parse_obj_as(type_=DeforumSdPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
@@ -4838,7 +9175,7 @@ async def main() -> None:
async def post_v3email_face_inpainting_async(
self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> EmailFaceInpaintingPageResponse:
+ ) -> typing.Any:
"""
Parameters
----------
@@ -4847,7 +9184,7 @@ async def post_v3email_face_inpainting_async(
Returns
-------
- EmailFaceInpaintingPageResponse
+ typing.Any
Successful Response
Examples
@@ -4872,7 +9209,7 @@ async def main() -> None:
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(EmailFaceInpaintingPageResponse, parse_obj_as(type_=EmailFaceInpaintingPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
@@ -4880,7 +9217,7 @@ async def main() -> None:
async def post_v3face_inpainting_async(
self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> FaceInpaintingPageResponse:
+ ) -> typing.Any:
"""
Parameters
----------
@@ -4889,7 +9226,7 @@ async def post_v3face_inpainting_async(
Returns
-------
- FaceInpaintingPageResponse
+ typing.Any
Successful Response
Examples
@@ -4914,7 +9251,7 @@ async def main() -> None:
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(FaceInpaintingPageResponse, parse_obj_as(type_=FaceInpaintingPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
@@ -4922,7 +9259,7 @@ async def main() -> None:
async def post_v3google_image_gen_async(
self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> GoogleImageGenPageResponse:
+ ) -> typing.Any:
"""
Parameters
----------
@@ -4931,7 +9268,7 @@ async def post_v3google_image_gen_async(
Returns
-------
- GoogleImageGenPageResponse
+ typing.Any
Successful Response
Examples
@@ -4956,7 +9293,7 @@ async def main() -> None:
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(GoogleImageGenPageResponse, parse_obj_as(type_=GoogleImageGenPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
@@ -4964,7 +9301,7 @@ async def main() -> None:
async def post_v3image_segmentation_async(
self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> ImageSegmentationPageResponse:
+ ) -> typing.Any:
"""
Parameters
----------
@@ -4973,7 +9310,7 @@ async def post_v3image_segmentation_async(
Returns
-------
- ImageSegmentationPageResponse
+ typing.Any
Successful Response
Examples
@@ -4998,15 +9335,13 @@ async def main() -> None:
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(ImageSegmentationPageResponse, parse_obj_as(type_=ImageSegmentationPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def post_v3img2img_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> Img2ImgPageResponse:
+ async def post_v3img2img_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -5015,7 +9350,7 @@ async def post_v3img2img_async(
Returns
-------
- Img2ImgPageResponse
+ typing.Any
Successful Response
Examples
@@ -5040,7 +9375,7 @@ async def main() -> None:
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(Img2ImgPageResponse, parse_obj_as(type_=Img2ImgPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
@@ -5048,7 +9383,7 @@ async def main() -> None:
async def post_v3letter_writer_async(
self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> LetterWriterPageResponse:
+ ) -> typing.Any:
"""
Parameters
----------
@@ -5057,7 +9392,7 @@ async def post_v3letter_writer_async(
Returns
-------
- LetterWriterPageResponse
+ typing.Any
Successful Response
Examples
@@ -5082,15 +9417,13 @@ async def main() -> None:
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(LetterWriterPageResponse, parse_obj_as(type_=LetterWriterPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def post_v3lipsync_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> LipsyncPageResponse:
+ async def post_v3lipsync_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -5099,7 +9432,7 @@ async def post_v3lipsync_async(
Returns
-------
- LipsyncPageResponse
+ typing.Any
Successful Response
Examples
@@ -5124,15 +9457,13 @@ async def main() -> None:
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(LipsyncPageResponse, parse_obj_as(type_=LipsyncPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def post_v3lipsync_tts_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> LipsyncTtsPageResponse:
+ async def post_v3lipsync_tts_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -5141,7 +9472,7 @@ async def post_v3lipsync_tts_async(
Returns
-------
- LipsyncTtsPageResponse
+ typing.Any
Successful Response
Examples
@@ -5166,7 +9497,7 @@ async def main() -> None:
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(LipsyncTtsPageResponse, parse_obj_as(type_=LipsyncTtsPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
@@ -5174,7 +9505,7 @@ async def main() -> None:
async def post_v3object_inpainting_async(
self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> ObjectInpaintingPageResponse:
+ ) -> typing.Any:
"""
Parameters
----------
@@ -5183,7 +9514,7 @@ async def post_v3object_inpainting_async(
Returns
-------
- ObjectInpaintingPageResponse
+ typing.Any
Successful Response
Examples
@@ -5208,15 +9539,13 @@ async def main() -> None:
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(ObjectInpaintingPageResponse, parse_obj_as(type_=ObjectInpaintingPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def post_v3seo_summary_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> SeoSummaryPageResponse:
+ async def post_v3seo_summary_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -5225,7 +9554,7 @@ async def post_v3seo_summary_async(
Returns
-------
- SeoSummaryPageResponse
+ typing.Any
Successful Response
Examples
@@ -5250,15 +9579,13 @@ async def main() -> None:
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(SeoSummaryPageResponse, parse_obj_as(type_=SeoSummaryPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def post_v3smart_gpt_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> SmartGptPageResponse:
+ async def post_v3smart_gpt_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -5267,7 +9594,7 @@ async def post_v3smart_gpt_async(
Returns
-------
- SmartGptPageResponse
+ typing.Any
Successful Response
Examples
@@ -5292,7 +9619,7 @@ async def main() -> None:
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(SmartGptPageResponse, parse_obj_as(type_=SmartGptPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
@@ -5300,7 +9627,7 @@ async def main() -> None:
async def post_v3social_lookup_email_async(
self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> SocialLookupEmailPageResponse:
+ ) -> typing.Any:
"""
Parameters
----------
@@ -5309,7 +9636,7 @@ async def post_v3social_lookup_email_async(
Returns
-------
- SocialLookupEmailPageResponse
+ typing.Any
Successful Response
Examples
@@ -5334,7 +9661,7 @@ async def main() -> None:
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(SocialLookupEmailPageResponse, parse_obj_as(type_=SocialLookupEmailPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
@@ -5342,7 +9669,7 @@ async def main() -> None:
async def post_v3text_to_speech_async(
self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> TextToSpeechPageResponse:
+ ) -> typing.Any:
"""
Parameters
----------
@@ -5351,7 +9678,7 @@ async def post_v3text_to_speech_async(
Returns
-------
- TextToSpeechPageResponse
+ typing.Any
Successful Response
Examples
@@ -5376,15 +9703,13 @@ async def main() -> None:
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(TextToSpeechPageResponse, parse_obj_as(type_=TextToSpeechPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def post_v3art_qr_code_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> QrCodeGeneratorPageResponse:
+ async def post_v3art_qr_code_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -5393,7 +9718,7 @@ async def post_v3art_qr_code_async(
Returns
-------
- QrCodeGeneratorPageResponse
+ typing.Any
Successful Response
Examples
@@ -5418,13 +9743,13 @@ async def main() -> None:
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(QrCodeGeneratorPageResponse, parse_obj_as(type_=QrCodeGeneratorPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def post_v3asr_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> AsrPageResponse:
+ async def post_v3asr_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -5433,7 +9758,7 @@ async def post_v3asr_async(self, *, request_options: typing.Optional[RequestOpti
Returns
-------
- AsrPageResponse
+ typing.Any
Successful Response
Examples
@@ -5458,15 +9783,13 @@ async def main() -> None:
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(AsrPageResponse, parse_obj_as(type_=AsrPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def post_v3bulk_eval_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> BulkEvalPageResponse:
+ async def post_v3bulk_eval_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -5475,7 +9798,7 @@ async def post_v3bulk_eval_async(
Returns
-------
- BulkEvalPageResponse
+ typing.Any
Successful Response
Examples
@@ -5500,15 +9823,13 @@ async def main() -> None:
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BulkEvalPageResponse, parse_obj_as(type_=BulkEvalPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def post_v3bulk_runner_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> BulkRunnerPageResponse:
+ async def post_v3bulk_runner_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -5517,7 +9838,7 @@ async def post_v3bulk_runner_async(
Returns
-------
- BulkRunnerPageResponse
+ typing.Any
Successful Response
Examples
@@ -5542,7 +9863,7 @@ async def main() -> None:
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BulkRunnerPageResponse, parse_obj_as(type_=BulkRunnerPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
@@ -5550,7 +9871,7 @@ async def main() -> None:
async def post_v3compare_ai_upscalers_async(
self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> CompareUpscalerPageResponse:
+ ) -> typing.Any:
"""
Parameters
----------
@@ -5559,7 +9880,7 @@ async def post_v3compare_ai_upscalers_async(
Returns
-------
- CompareUpscalerPageResponse
+ typing.Any
Successful Response
Examples
@@ -5584,15 +9905,13 @@ async def main() -> None:
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(CompareUpscalerPageResponse, parse_obj_as(type_=CompareUpscalerPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def post_v3doc_extract_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> DocExtractPageResponse:
+ async def post_v3doc_extract_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -5601,7 +9920,7 @@ async def post_v3doc_extract_async(
Returns
-------
- DocExtractPageResponse
+ typing.Any
Successful Response
Examples
@@ -5626,15 +9945,13 @@ async def main() -> None:
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(DocExtractPageResponse, parse_obj_as(type_=DocExtractPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def post_v3doc_search_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> DocSearchPageResponse:
+ async def post_v3doc_search_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -5643,7 +9960,7 @@ async def post_v3doc_search_async(
Returns
-------
- DocSearchPageResponse
+ typing.Any
Successful Response
Examples
@@ -5668,15 +9985,13 @@ async def main() -> None:
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(DocSearchPageResponse, parse_obj_as(type_=DocSearchPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def post_v3doc_summary_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> DocSummaryPageResponse:
+ async def post_v3doc_summary_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -5685,7 +10000,7 @@ async def post_v3doc_summary_async(
Returns
-------
- DocSummaryPageResponse
+ typing.Any
Successful Response
Examples
@@ -5710,15 +10025,13 @@ async def main() -> None:
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(DocSummaryPageResponse, parse_obj_as(type_=DocSummaryPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def post_v3embeddings_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> EmbeddingsPageResponse:
+ async def post_v3embeddings_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -5727,7 +10040,7 @@ async def post_v3embeddings_async(
Returns
-------
- EmbeddingsPageResponse
+ typing.Any
Successful Response
Examples
@@ -5752,15 +10065,13 @@ async def main() -> None:
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(EmbeddingsPageResponse, parse_obj_as(type_=EmbeddingsPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def post_v3functions_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> FunctionsPageResponse:
+ async def post_v3functions_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -5769,7 +10080,7 @@ async def post_v3functions_async(
Returns
-------
- FunctionsPageResponse
+ typing.Any
Successful Response
Examples
@@ -5794,15 +10105,13 @@ async def main() -> None:
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(FunctionsPageResponse, parse_obj_as(type_=FunctionsPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def post_v3google_gpt_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> GoogleGptPageResponse:
+ async def post_v3google_gpt_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -5811,7 +10120,7 @@ async def post_v3google_gpt_async(
Returns
-------
- GoogleGptPageResponse
+ typing.Any
Successful Response
Examples
@@ -5836,7 +10145,7 @@ async def main() -> None:
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(GoogleGptPageResponse, parse_obj_as(type_=GoogleGptPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
@@ -5844,7 +10153,7 @@ async def main() -> None:
async def post_v3related_qna_maker_doc_async(
self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> RelatedQnADocPageResponse:
+ ) -> typing.Any:
"""
Parameters
----------
@@ -5853,7 +10162,7 @@ async def post_v3related_qna_maker_doc_async(
Returns
-------
- RelatedQnADocPageResponse
+ typing.Any
Successful Response
Examples
@@ -5878,7 +10187,7 @@ async def main() -> None:
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(RelatedQnADocPageResponse, parse_obj_as(type_=RelatedQnADocPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
@@ -5886,7 +10195,7 @@ async def main() -> None:
async def post_v3related_qna_maker_async(
self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> RelatedQnAPageResponse:
+ ) -> typing.Any:
"""
Parameters
----------
@@ -5895,7 +10204,7 @@ async def post_v3related_qna_maker_async(
Returns
-------
- RelatedQnAPageResponse
+ typing.Any
Successful Response
Examples
@@ -5920,15 +10229,13 @@ async def main() -> None:
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(RelatedQnAPageResponse, parse_obj_as(type_=RelatedQnAPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def post_v3text2audio_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> Text2AudioPageResponse:
+ async def post_v3text2audio_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -5937,7 +10244,7 @@ async def post_v3text2audio_async(
Returns
-------
- Text2AudioPageResponse
+ typing.Any
Successful Response
Examples
@@ -5962,15 +10269,13 @@ async def main() -> None:
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(Text2AudioPageResponse, parse_obj_as(type_=Text2AudioPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def post_v3translate_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> TranslationPageResponse:
+ async def post_v3translate_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -5979,7 +10284,7 @@ async def post_v3translate_async(
Returns
-------
- TranslationPageResponse
+ typing.Any
Successful Response
Examples
@@ -6004,15 +10309,13 @@ async def main() -> None:
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(TranslationPageResponse, parse_obj_as(type_=TranslationPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def post_v3video_bots_async(
- self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> VideoBotsPageResponse:
+ async def post_v3video_bots_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Parameters
----------
@@ -6021,7 +10324,7 @@ async def post_v3video_bots_async(
Returns
-------
- VideoBotsPageResponse
+ typing.Any
Successful Response
Examples
@@ -6046,7 +10349,7 @@ async def main() -> None:
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(VideoBotsPageResponse, parse_obj_as(type_=VideoBotsPageResponse, object_=_response.json())) # type: ignore
+ return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
diff --git a/src/gooey/compare_ai_image_generators/__init__.py b/src/gooey/compare_ai_image_generators/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/compare_ai_image_generators/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/compare_ai_image_generators/client.py b/src/gooey/compare_ai_image_generators/client.py
new file mode 100644
index 0000000..f2fb2fa
--- /dev/null
+++ b/src/gooey/compare_ai_image_generators/client.py
@@ -0,0 +1,133 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import parse_obj_as
+from ..core.request_options import RequestOptions
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.compare_text2img_page_status_response import CompareText2ImgPageStatusResponse
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+
+
+class CompareAiImageGeneratorsClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def status_compare_text2img(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> CompareText2ImgPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ CompareText2ImgPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.compare_ai_image_generators.status_compare_text2img(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/CompareText2Img/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(CompareText2ImgPageStatusResponse, parse_obj_as(type_=CompareText2ImgPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncCompareAiImageGeneratorsClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def status_compare_text2img(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> CompareText2ImgPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ CompareText2ImgPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.compare_ai_image_generators.status_compare_text2img(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/CompareText2Img/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(CompareText2ImgPageStatusResponse, parse_obj_as(type_=CompareText2ImgPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/compare_ai_image_upscalers/__init__.py b/src/gooey/compare_ai_image_upscalers/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/compare_ai_image_upscalers/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/compare_ai_image_upscalers/client.py b/src/gooey/compare_ai_image_upscalers/client.py
new file mode 100644
index 0000000..259bad1
--- /dev/null
+++ b/src/gooey/compare_ai_image_upscalers/client.py
@@ -0,0 +1,133 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import parse_obj_as
+from ..core.request_options import RequestOptions
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.compare_upscaler_page_status_response import CompareUpscalerPageStatusResponse
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+
+
+class CompareAiImageUpscalersClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def status_compare_ai_upscalers(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> CompareUpscalerPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ CompareUpscalerPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.compare_ai_image_upscalers.status_compare_ai_upscalers(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/compare-ai-upscalers/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(CompareUpscalerPageStatusResponse, parse_obj_as(type_=CompareUpscalerPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncCompareAiImageUpscalersClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def status_compare_ai_upscalers(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> CompareUpscalerPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ CompareUpscalerPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.compare_ai_image_upscalers.status_compare_ai_upscalers(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/compare-ai-upscalers/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(CompareUpscalerPageStatusResponse, parse_obj_as(type_=CompareUpscalerPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/compare_ai_translations/__init__.py b/src/gooey/compare_ai_translations/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/compare_ai_translations/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/compare_ai_translations/client.py b/src/gooey/compare_ai_translations/client.py
new file mode 100644
index 0000000..54852b0
--- /dev/null
+++ b/src/gooey/compare_ai_translations/client.py
@@ -0,0 +1,133 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import parse_obj_as
+from ..core.request_options import RequestOptions
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.translation_page_status_response import TranslationPageStatusResponse
+
+
+class CompareAiTranslationsClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def status_translate(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> TranslationPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ TranslationPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.compare_ai_translations.status_translate(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/translate/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(TranslationPageStatusResponse, parse_obj_as(type_=TranslationPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncCompareAiTranslationsClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def status_translate(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> TranslationPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ TranslationPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.compare_ai_translations.status_translate(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/translate/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(TranslationPageStatusResponse, parse_obj_as(type_=TranslationPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/compare_ai_voice_generators/__init__.py b/src/gooey/compare_ai_voice_generators/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/compare_ai_voice_generators/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/compare_ai_voice_generators/client.py b/src/gooey/compare_ai_voice_generators/client.py
new file mode 100644
index 0000000..6b0f88c
--- /dev/null
+++ b/src/gooey/compare_ai_voice_generators/client.py
@@ -0,0 +1,133 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import parse_obj_as
+from ..core.request_options import RequestOptions
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.text_to_speech_page_status_response import TextToSpeechPageStatusResponse
+
+
+class CompareAiVoiceGeneratorsClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def status_text_to_speech(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> TextToSpeechPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ TextToSpeechPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.compare_ai_voice_generators.status_text_to_speech(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/TextToSpeech/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(TextToSpeechPageStatusResponse, parse_obj_as(type_=TextToSpeechPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncCompareAiVoiceGeneratorsClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def status_text_to_speech(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> TextToSpeechPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ TextToSpeechPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.compare_ai_voice_generators.status_text_to_speech(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/TextToSpeech/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(TextToSpeechPageStatusResponse, parse_obj_as(type_=TextToSpeechPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/copilot_for_your_enterprise/__init__.py b/src/gooey/copilot_for_your_enterprise/__init__.py
index f3ea265..28df997 100644
--- a/src/gooey/copilot_for_your_enterprise/__init__.py
+++ b/src/gooey/copilot_for_your_enterprise/__init__.py
@@ -1,2 +1,27 @@
# This file was auto-generated by Fern from our API Definition.
+from .types import (
+ AsyncFormVideoBotsRequestAsrModel,
+ AsyncFormVideoBotsRequestCitationStyle,
+ AsyncFormVideoBotsRequestEmbeddingModel,
+ AsyncFormVideoBotsRequestLipsyncModel,
+ AsyncFormVideoBotsRequestOpenaiTtsModel,
+ AsyncFormVideoBotsRequestOpenaiVoiceName,
+ AsyncFormVideoBotsRequestResponseFormatType,
+ AsyncFormVideoBotsRequestSelectedModel,
+ AsyncFormVideoBotsRequestTranslationModel,
+ AsyncFormVideoBotsRequestTtsProvider,
+)
+
+__all__ = [
+ "AsyncFormVideoBotsRequestAsrModel",
+ "AsyncFormVideoBotsRequestCitationStyle",
+ "AsyncFormVideoBotsRequestEmbeddingModel",
+ "AsyncFormVideoBotsRequestLipsyncModel",
+ "AsyncFormVideoBotsRequestOpenaiTtsModel",
+ "AsyncFormVideoBotsRequestOpenaiVoiceName",
+ "AsyncFormVideoBotsRequestResponseFormatType",
+ "AsyncFormVideoBotsRequestSelectedModel",
+ "AsyncFormVideoBotsRequestTranslationModel",
+ "AsyncFormVideoBotsRequestTtsProvider",
+]
diff --git a/src/gooey/copilot_for_your_enterprise/client.py b/src/gooey/copilot_for_your_enterprise/client.py
index 9cb701b..c5d0114 100644
--- a/src/gooey/copilot_for_your_enterprise/client.py
+++ b/src/gooey/copilot_for_your_enterprise/client.py
@@ -12,10 +12,28 @@
from ..errors.payment_required_error import PaymentRequiredError
from ..errors.too_many_requests_error import TooManyRequestsError
from ..errors.unprocessable_entity_error import UnprocessableEntityError
-from ..types.body_async_form_video_bots import BodyAsyncFormVideoBots
+from ..types.conversation_entry import ConversationEntry
from ..types.failed_reponse_model_v2 import FailedReponseModelV2
from ..types.generic_error_response import GenericErrorResponse
from ..types.http_validation_error import HttpValidationError
+from ..types.llm_tools import LlmTools
+from ..types.recipe_function import RecipeFunction
+from ..types.run_settings import RunSettings
+from ..types.sad_talker_settings import SadTalkerSettings
+from ..types.video_bots_page_status_response import VideoBotsPageStatusResponse
+from .types.async_form_video_bots_request_asr_model import AsyncFormVideoBotsRequestAsrModel
+from .types.async_form_video_bots_request_citation_style import AsyncFormVideoBotsRequestCitationStyle
+from .types.async_form_video_bots_request_embedding_model import AsyncFormVideoBotsRequestEmbeddingModel
+from .types.async_form_video_bots_request_lipsync_model import AsyncFormVideoBotsRequestLipsyncModel
+from .types.async_form_video_bots_request_openai_tts_model import AsyncFormVideoBotsRequestOpenaiTtsModel
+from .types.async_form_video_bots_request_openai_voice_name import AsyncFormVideoBotsRequestOpenaiVoiceName
+from .types.async_form_video_bots_request_response_format_type import AsyncFormVideoBotsRequestResponseFormatType
+from .types.async_form_video_bots_request_selected_model import AsyncFormVideoBotsRequestSelectedModel
+from .types.async_form_video_bots_request_translation_model import AsyncFormVideoBotsRequestTranslationModel
+from .types.async_form_video_bots_request_tts_provider import AsyncFormVideoBotsRequestTtsProvider
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
class CopilotForYourEnterpriseClient:
@@ -23,19 +41,223 @@ def __init__(self, *, client_wrapper: SyncClientWrapper):
self._client_wrapper = client_wrapper
def async_form_video_bots(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormVideoBots:
+ self,
+ *,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ input_prompt: typing.Optional[str] = None,
+ input_audio: typing.Optional[str] = None,
+ input_images: typing.Optional[typing.List[str]] = None,
+ input_documents: typing.Optional[typing.List[str]] = None,
+ doc_extract_url: typing.Optional[str] = None,
+ messages: typing.Optional[typing.List[ConversationEntry]] = None,
+ bot_script: typing.Optional[str] = None,
+ selected_model: typing.Optional[AsyncFormVideoBotsRequestSelectedModel] = None,
+ document_model: typing.Optional[str] = None,
+ task_instructions: typing.Optional[str] = None,
+ query_instructions: typing.Optional[str] = None,
+ keyword_instructions: typing.Optional[str] = None,
+ documents: typing.Optional[typing.List[str]] = None,
+ max_references: typing.Optional[int] = None,
+ max_context_words: typing.Optional[int] = None,
+ scroll_jump: typing.Optional[int] = None,
+ embedding_model: typing.Optional[AsyncFormVideoBotsRequestEmbeddingModel] = None,
+ dense_weight: typing.Optional[float] = None,
+ citation_style: typing.Optional[AsyncFormVideoBotsRequestCitationStyle] = None,
+ use_url_shortener: typing.Optional[bool] = None,
+ asr_model: typing.Optional[AsyncFormVideoBotsRequestAsrModel] = None,
+ asr_language: typing.Optional[str] = None,
+ translation_model: typing.Optional[AsyncFormVideoBotsRequestTranslationModel] = None,
+ user_language: typing.Optional[str] = None,
+ input_glossary_document: typing.Optional[str] = None,
+ output_glossary_document: typing.Optional[str] = None,
+ lipsync_model: typing.Optional[AsyncFormVideoBotsRequestLipsyncModel] = None,
+ tools: typing.Optional[typing.List[LlmTools]] = None,
+ avoid_repetition: typing.Optional[bool] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[float] = None,
+ max_tokens: typing.Optional[int] = None,
+ sampling_temperature: typing.Optional[float] = None,
+ response_format_type: typing.Optional[AsyncFormVideoBotsRequestResponseFormatType] = None,
+ tts_provider: typing.Optional[AsyncFormVideoBotsRequestTtsProvider] = None,
+ uberduck_voice_name: typing.Optional[str] = None,
+ uberduck_speaking_rate: typing.Optional[float] = None,
+ google_voice_name: typing.Optional[str] = None,
+ google_speaking_rate: typing.Optional[float] = None,
+ google_pitch: typing.Optional[float] = None,
+ bark_history_prompt: typing.Optional[str] = None,
+ elevenlabs_voice_name: typing.Optional[str] = None,
+ elevenlabs_api_key: typing.Optional[str] = None,
+ elevenlabs_voice_id: typing.Optional[str] = None,
+ elevenlabs_model: typing.Optional[str] = None,
+ elevenlabs_stability: typing.Optional[float] = None,
+ elevenlabs_similarity_boost: typing.Optional[float] = None,
+ elevenlabs_style: typing.Optional[float] = None,
+ elevenlabs_speaker_boost: typing.Optional[bool] = None,
+ azure_voice_name: typing.Optional[str] = None,
+ openai_voice_name: typing.Optional[AsyncFormVideoBotsRequestOpenaiVoiceName] = None,
+ openai_tts_model: typing.Optional[AsyncFormVideoBotsRequestOpenaiTtsModel] = None,
+ input_face: typing.Optional[str] = None,
+ face_padding_top: typing.Optional[int] = None,
+ face_padding_bottom: typing.Optional[int] = None,
+ face_padding_left: typing.Optional[int] = None,
+ face_padding_right: typing.Optional[int] = None,
+ sadtalker_settings: typing.Optional[SadTalkerSettings] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> VideoBotsPageStatusResponse:
"""
Parameters
----------
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ input_prompt : typing.Optional[str]
+
+ input_audio : typing.Optional[str]
+
+ input_images : typing.Optional[typing.List[str]]
+
+ input_documents : typing.Optional[typing.List[str]]
+
+ doc_extract_url : typing.Optional[str]
+ Select a workflow to extract text from documents and images.
+
+ messages : typing.Optional[typing.List[ConversationEntry]]
+
+ bot_script : typing.Optional[str]
+
+ selected_model : typing.Optional[AsyncFormVideoBotsRequestSelectedModel]
+
+ document_model : typing.Optional[str]
+ When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api))
+
+ task_instructions : typing.Optional[str]
+
+ query_instructions : typing.Optional[str]
+
+ keyword_instructions : typing.Optional[str]
+
+ documents : typing.Optional[typing.List[str]]
+
+ max_references : typing.Optional[int]
+
+ max_context_words : typing.Optional[int]
+
+ scroll_jump : typing.Optional[int]
+
+ embedding_model : typing.Optional[AsyncFormVideoBotsRequestEmbeddingModel]
+
+ dense_weight : typing.Optional[float]
+
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+
+ citation_style : typing.Optional[AsyncFormVideoBotsRequestCitationStyle]
+
+ use_url_shortener : typing.Optional[bool]
+
+ asr_model : typing.Optional[AsyncFormVideoBotsRequestAsrModel]
+ Choose a model to transcribe incoming audio messages to text.
+
+ asr_language : typing.Optional[str]
+ Choose a language to transcribe incoming audio messages to text.
+
+ translation_model : typing.Optional[AsyncFormVideoBotsRequestTranslationModel]
+
+ user_language : typing.Optional[str]
+ Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages.
+
+ input_glossary_document : typing.Optional[str]
+
+ Translation Glossary for User Langauge -> LLM Language (English)
+
+
+ output_glossary_document : typing.Optional[str]
+
+ Translation Glossary for LLM Language (English) -> User Langauge
+
+
+ lipsync_model : typing.Optional[AsyncFormVideoBotsRequestLipsyncModel]
+
+ tools : typing.Optional[typing.List[LlmTools]]
+ Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ response_format_type : typing.Optional[AsyncFormVideoBotsRequestResponseFormatType]
+
+ tts_provider : typing.Optional[AsyncFormVideoBotsRequestTtsProvider]
+
+ uberduck_voice_name : typing.Optional[str]
+
+ uberduck_speaking_rate : typing.Optional[float]
+
+ google_voice_name : typing.Optional[str]
+
+ google_speaking_rate : typing.Optional[float]
+
+ google_pitch : typing.Optional[float]
+
+ bark_history_prompt : typing.Optional[str]
+
+ elevenlabs_voice_name : typing.Optional[str]
+ Use `elevenlabs_voice_id` instead
+
+ elevenlabs_api_key : typing.Optional[str]
+
+ elevenlabs_voice_id : typing.Optional[str]
+
+ elevenlabs_model : typing.Optional[str]
+
+ elevenlabs_stability : typing.Optional[float]
+
+ elevenlabs_similarity_boost : typing.Optional[float]
+
+ elevenlabs_style : typing.Optional[float]
+
+ elevenlabs_speaker_boost : typing.Optional[bool]
+
+ azure_voice_name : typing.Optional[str]
+
+ openai_voice_name : typing.Optional[AsyncFormVideoBotsRequestOpenaiVoiceName]
+
+ openai_tts_model : typing.Optional[AsyncFormVideoBotsRequestOpenaiTtsModel]
+
+ input_face : typing.Optional[str]
+
+ face_padding_top : typing.Optional[int]
+
+ face_padding_bottom : typing.Optional[int]
+
+ face_padding_left : typing.Optional[int]
+
+ face_padding_right : typing.Optional[int]
+
+ sadtalker_settings : typing.Optional[SadTalkerSettings]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormVideoBots
+ VideoBotsPageStatusResponse
Successful Response
Examples
@@ -51,11 +273,76 @@ def async_form_video_bots(
"v3/video-bots/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "input_prompt": input_prompt,
+ "input_audio": input_audio,
+ "input_images": input_images,
+ "input_documents": input_documents,
+ "doc_extract_url": doc_extract_url,
+ "messages": messages,
+ "bot_script": bot_script,
+ "selected_model": selected_model,
+ "document_model": document_model,
+ "task_instructions": task_instructions,
+ "query_instructions": query_instructions,
+ "keyword_instructions": keyword_instructions,
+ "documents": documents,
+ "max_references": max_references,
+ "max_context_words": max_context_words,
+ "scroll_jump": scroll_jump,
+ "embedding_model": embedding_model,
+ "dense_weight": dense_weight,
+ "citation_style": citation_style,
+ "use_url_shortener": use_url_shortener,
+ "asr_model": asr_model,
+ "asr_language": asr_language,
+ "translation_model": translation_model,
+ "user_language": user_language,
+ "input_glossary_document": input_glossary_document,
+ "output_glossary_document": output_glossary_document,
+ "lipsync_model": lipsync_model,
+ "tools": tools,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "response_format_type": response_format_type,
+ "tts_provider": tts_provider,
+ "uberduck_voice_name": uberduck_voice_name,
+ "uberduck_speaking_rate": uberduck_speaking_rate,
+ "google_voice_name": google_voice_name,
+ "google_speaking_rate": google_speaking_rate,
+ "google_pitch": google_pitch,
+ "bark_history_prompt": bark_history_prompt,
+ "elevenlabs_voice_name": elevenlabs_voice_name,
+ "elevenlabs_api_key": elevenlabs_api_key,
+ "elevenlabs_voice_id": elevenlabs_voice_id,
+ "elevenlabs_model": elevenlabs_model,
+ "elevenlabs_stability": elevenlabs_stability,
+ "elevenlabs_similarity_boost": elevenlabs_similarity_boost,
+ "elevenlabs_style": elevenlabs_style,
+ "elevenlabs_speaker_boost": elevenlabs_speaker_boost,
+ "azure_voice_name": azure_voice_name,
+ "openai_voice_name": openai_voice_name,
+ "openai_tts_model": openai_tts_model,
+ "input_face": input_face,
+ "face_padding_top": face_padding_top,
+ "face_padding_bottom": face_padding_bottom,
+ "face_padding_left": face_padding_left,
+ "face_padding_right": face_padding_right,
+ "sadtalker_settings": sadtalker_settings,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormVideoBots, parse_obj_as(type_=BodyAsyncFormVideoBots, object_=_response.json())) # type: ignore
+ return typing.cast(VideoBotsPageStatusResponse, parse_obj_as(type_=VideoBotsPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -81,25 +368,279 @@ def async_form_video_bots(
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
+ def status_video_bots(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> VideoBotsPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ VideoBotsPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.copilot_for_your_enterprise.status_video_bots(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/video-bots/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(VideoBotsPageStatusResponse, parse_obj_as(type_=VideoBotsPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
class AsyncCopilotForYourEnterpriseClient:
def __init__(self, *, client_wrapper: AsyncClientWrapper):
self._client_wrapper = client_wrapper
async def async_form_video_bots(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormVideoBots:
+ self,
+ *,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ input_prompt: typing.Optional[str] = None,
+ input_audio: typing.Optional[str] = None,
+ input_images: typing.Optional[typing.List[str]] = None,
+ input_documents: typing.Optional[typing.List[str]] = None,
+ doc_extract_url: typing.Optional[str] = None,
+ messages: typing.Optional[typing.List[ConversationEntry]] = None,
+ bot_script: typing.Optional[str] = None,
+ selected_model: typing.Optional[AsyncFormVideoBotsRequestSelectedModel] = None,
+ document_model: typing.Optional[str] = None,
+ task_instructions: typing.Optional[str] = None,
+ query_instructions: typing.Optional[str] = None,
+ keyword_instructions: typing.Optional[str] = None,
+ documents: typing.Optional[typing.List[str]] = None,
+ max_references: typing.Optional[int] = None,
+ max_context_words: typing.Optional[int] = None,
+ scroll_jump: typing.Optional[int] = None,
+ embedding_model: typing.Optional[AsyncFormVideoBotsRequestEmbeddingModel] = None,
+ dense_weight: typing.Optional[float] = None,
+ citation_style: typing.Optional[AsyncFormVideoBotsRequestCitationStyle] = None,
+ use_url_shortener: typing.Optional[bool] = None,
+ asr_model: typing.Optional[AsyncFormVideoBotsRequestAsrModel] = None,
+ asr_language: typing.Optional[str] = None,
+ translation_model: typing.Optional[AsyncFormVideoBotsRequestTranslationModel] = None,
+ user_language: typing.Optional[str] = None,
+ input_glossary_document: typing.Optional[str] = None,
+ output_glossary_document: typing.Optional[str] = None,
+ lipsync_model: typing.Optional[AsyncFormVideoBotsRequestLipsyncModel] = None,
+ tools: typing.Optional[typing.List[LlmTools]] = None,
+ avoid_repetition: typing.Optional[bool] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[float] = None,
+ max_tokens: typing.Optional[int] = None,
+ sampling_temperature: typing.Optional[float] = None,
+ response_format_type: typing.Optional[AsyncFormVideoBotsRequestResponseFormatType] = None,
+ tts_provider: typing.Optional[AsyncFormVideoBotsRequestTtsProvider] = None,
+ uberduck_voice_name: typing.Optional[str] = None,
+ uberduck_speaking_rate: typing.Optional[float] = None,
+ google_voice_name: typing.Optional[str] = None,
+ google_speaking_rate: typing.Optional[float] = None,
+ google_pitch: typing.Optional[float] = None,
+ bark_history_prompt: typing.Optional[str] = None,
+ elevenlabs_voice_name: typing.Optional[str] = None,
+ elevenlabs_api_key: typing.Optional[str] = None,
+ elevenlabs_voice_id: typing.Optional[str] = None,
+ elevenlabs_model: typing.Optional[str] = None,
+ elevenlabs_stability: typing.Optional[float] = None,
+ elevenlabs_similarity_boost: typing.Optional[float] = None,
+ elevenlabs_style: typing.Optional[float] = None,
+ elevenlabs_speaker_boost: typing.Optional[bool] = None,
+ azure_voice_name: typing.Optional[str] = None,
+ openai_voice_name: typing.Optional[AsyncFormVideoBotsRequestOpenaiVoiceName] = None,
+ openai_tts_model: typing.Optional[AsyncFormVideoBotsRequestOpenaiTtsModel] = None,
+ input_face: typing.Optional[str] = None,
+ face_padding_top: typing.Optional[int] = None,
+ face_padding_bottom: typing.Optional[int] = None,
+ face_padding_left: typing.Optional[int] = None,
+ face_padding_right: typing.Optional[int] = None,
+ sadtalker_settings: typing.Optional[SadTalkerSettings] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> VideoBotsPageStatusResponse:
"""
Parameters
----------
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ input_prompt : typing.Optional[str]
+
+ input_audio : typing.Optional[str]
+
+ input_images : typing.Optional[typing.List[str]]
+
+ input_documents : typing.Optional[typing.List[str]]
+
+ doc_extract_url : typing.Optional[str]
+ Select a workflow to extract text from documents and images.
+
+ messages : typing.Optional[typing.List[ConversationEntry]]
+
+ bot_script : typing.Optional[str]
+
+ selected_model : typing.Optional[AsyncFormVideoBotsRequestSelectedModel]
+
+ document_model : typing.Optional[str]
+ When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api))
+
+ task_instructions : typing.Optional[str]
+
+ query_instructions : typing.Optional[str]
+
+ keyword_instructions : typing.Optional[str]
+
+ documents : typing.Optional[typing.List[str]]
+
+ max_references : typing.Optional[int]
+
+ max_context_words : typing.Optional[int]
+
+ scroll_jump : typing.Optional[int]
+
+ embedding_model : typing.Optional[AsyncFormVideoBotsRequestEmbeddingModel]
+
+ dense_weight : typing.Optional[float]
+
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+
+ citation_style : typing.Optional[AsyncFormVideoBotsRequestCitationStyle]
+
+ use_url_shortener : typing.Optional[bool]
+
+ asr_model : typing.Optional[AsyncFormVideoBotsRequestAsrModel]
+ Choose a model to transcribe incoming audio messages to text.
+
+ asr_language : typing.Optional[str]
+ Choose a language to transcribe incoming audio messages to text.
+
+ translation_model : typing.Optional[AsyncFormVideoBotsRequestTranslationModel]
+
+ user_language : typing.Optional[str]
+ Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages.
+
+ input_glossary_document : typing.Optional[str]
+
+ Translation Glossary for User Langauge -> LLM Language (English)
+
+
+ output_glossary_document : typing.Optional[str]
+
+ Translation Glossary for LLM Language (English) -> User Langauge
+
+
+ lipsync_model : typing.Optional[AsyncFormVideoBotsRequestLipsyncModel]
+
+ tools : typing.Optional[typing.List[LlmTools]]
+ Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ response_format_type : typing.Optional[AsyncFormVideoBotsRequestResponseFormatType]
+
+ tts_provider : typing.Optional[AsyncFormVideoBotsRequestTtsProvider]
+
+ uberduck_voice_name : typing.Optional[str]
+
+ uberduck_speaking_rate : typing.Optional[float]
+
+ google_voice_name : typing.Optional[str]
+
+ google_speaking_rate : typing.Optional[float]
+
+ google_pitch : typing.Optional[float]
+
+ bark_history_prompt : typing.Optional[str]
+
+ elevenlabs_voice_name : typing.Optional[str]
+ Use `elevenlabs_voice_id` instead
+
+ elevenlabs_api_key : typing.Optional[str]
+
+ elevenlabs_voice_id : typing.Optional[str]
+
+ elevenlabs_model : typing.Optional[str]
+
+ elevenlabs_stability : typing.Optional[float]
+
+ elevenlabs_similarity_boost : typing.Optional[float]
+
+ elevenlabs_style : typing.Optional[float]
+
+ elevenlabs_speaker_boost : typing.Optional[bool]
+
+ azure_voice_name : typing.Optional[str]
+
+ openai_voice_name : typing.Optional[AsyncFormVideoBotsRequestOpenaiVoiceName]
+
+ openai_tts_model : typing.Optional[AsyncFormVideoBotsRequestOpenaiTtsModel]
+
+ input_face : typing.Optional[str]
+
+ face_padding_top : typing.Optional[int]
+
+ face_padding_bottom : typing.Optional[int]
+
+ face_padding_left : typing.Optional[int]
+
+ face_padding_right : typing.Optional[int]
+
+ sadtalker_settings : typing.Optional[SadTalkerSettings]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormVideoBots
+ VideoBotsPageStatusResponse
Successful Response
Examples
@@ -123,11 +664,76 @@ async def main() -> None:
"v3/video-bots/async/form",
method="POST",
params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "input_prompt": input_prompt,
+ "input_audio": input_audio,
+ "input_images": input_images,
+ "input_documents": input_documents,
+ "doc_extract_url": doc_extract_url,
+ "messages": messages,
+ "bot_script": bot_script,
+ "selected_model": selected_model,
+ "document_model": document_model,
+ "task_instructions": task_instructions,
+ "query_instructions": query_instructions,
+ "keyword_instructions": keyword_instructions,
+ "documents": documents,
+ "max_references": max_references,
+ "max_context_words": max_context_words,
+ "scroll_jump": scroll_jump,
+ "embedding_model": embedding_model,
+ "dense_weight": dense_weight,
+ "citation_style": citation_style,
+ "use_url_shortener": use_url_shortener,
+ "asr_model": asr_model,
+ "asr_language": asr_language,
+ "translation_model": translation_model,
+ "user_language": user_language,
+ "input_glossary_document": input_glossary_document,
+ "output_glossary_document": output_glossary_document,
+ "lipsync_model": lipsync_model,
+ "tools": tools,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "response_format_type": response_format_type,
+ "tts_provider": tts_provider,
+ "uberduck_voice_name": uberduck_voice_name,
+ "uberduck_speaking_rate": uberduck_speaking_rate,
+ "google_voice_name": google_voice_name,
+ "google_speaking_rate": google_speaking_rate,
+ "google_pitch": google_pitch,
+ "bark_history_prompt": bark_history_prompt,
+ "elevenlabs_voice_name": elevenlabs_voice_name,
+ "elevenlabs_api_key": elevenlabs_api_key,
+ "elevenlabs_voice_id": elevenlabs_voice_id,
+ "elevenlabs_model": elevenlabs_model,
+ "elevenlabs_stability": elevenlabs_stability,
+ "elevenlabs_similarity_boost": elevenlabs_similarity_boost,
+ "elevenlabs_style": elevenlabs_style,
+ "elevenlabs_speaker_boost": elevenlabs_speaker_boost,
+ "azure_voice_name": azure_voice_name,
+ "openai_voice_name": openai_voice_name,
+ "openai_tts_model": openai_tts_model,
+ "input_face": input_face,
+ "face_padding_top": face_padding_top,
+ "face_padding_bottom": face_padding_bottom,
+ "face_padding_left": face_padding_left,
+ "face_padding_right": face_padding_right,
+ "sadtalker_settings": sadtalker_settings,
+ "settings": settings,
+ },
+ files={},
request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormVideoBots, parse_obj_as(type_=BodyAsyncFormVideoBots, object_=_response.json())) # type: ignore
+ return typing.cast(VideoBotsPageStatusResponse, parse_obj_as(type_=VideoBotsPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -152,3 +758,61 @@ async def main() -> None:
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def status_video_bots(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> VideoBotsPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ VideoBotsPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.copilot_for_your_enterprise.status_video_bots(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/video-bots/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(VideoBotsPageStatusResponse, parse_obj_as(type_=VideoBotsPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/copilot_for_your_enterprise/types/__init__.py b/src/gooey/copilot_for_your_enterprise/types/__init__.py
new file mode 100644
index 0000000..a638966
--- /dev/null
+++ b/src/gooey/copilot_for_your_enterprise/types/__init__.py
@@ -0,0 +1,25 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .async_form_video_bots_request_asr_model import AsyncFormVideoBotsRequestAsrModel
+from .async_form_video_bots_request_citation_style import AsyncFormVideoBotsRequestCitationStyle
+from .async_form_video_bots_request_embedding_model import AsyncFormVideoBotsRequestEmbeddingModel
+from .async_form_video_bots_request_lipsync_model import AsyncFormVideoBotsRequestLipsyncModel
+from .async_form_video_bots_request_openai_tts_model import AsyncFormVideoBotsRequestOpenaiTtsModel
+from .async_form_video_bots_request_openai_voice_name import AsyncFormVideoBotsRequestOpenaiVoiceName
+from .async_form_video_bots_request_response_format_type import AsyncFormVideoBotsRequestResponseFormatType
+from .async_form_video_bots_request_selected_model import AsyncFormVideoBotsRequestSelectedModel
+from .async_form_video_bots_request_translation_model import AsyncFormVideoBotsRequestTranslationModel
+from .async_form_video_bots_request_tts_provider import AsyncFormVideoBotsRequestTtsProvider
+
+__all__ = [
+ "AsyncFormVideoBotsRequestAsrModel",
+ "AsyncFormVideoBotsRequestCitationStyle",
+ "AsyncFormVideoBotsRequestEmbeddingModel",
+ "AsyncFormVideoBotsRequestLipsyncModel",
+ "AsyncFormVideoBotsRequestOpenaiTtsModel",
+ "AsyncFormVideoBotsRequestOpenaiVoiceName",
+ "AsyncFormVideoBotsRequestResponseFormatType",
+ "AsyncFormVideoBotsRequestSelectedModel",
+ "AsyncFormVideoBotsRequestTranslationModel",
+ "AsyncFormVideoBotsRequestTtsProvider",
+]
diff --git a/src/gooey/copilot_integrations/types/create_stream_request_asr_model.py b/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_asr_model.py
similarity index 90%
rename from src/gooey/copilot_integrations/types/create_stream_request_asr_model.py
rename to src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_asr_model.py
index af166fa..6fb72ad 100644
--- a/src/gooey/copilot_integrations/types/create_stream_request_asr_model.py
+++ b/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_asr_model.py
@@ -2,7 +2,7 @@
import typing
-CreateStreamRequestAsrModel = typing.Union[
+AsyncFormVideoBotsRequestAsrModel = typing.Union[
typing.Literal[
"whisper_large_v2",
"whisper_large_v3",
diff --git a/src/gooey/types/doc_search_page_request_citation_style.py b/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_citation_style.py
similarity index 89%
rename from src/gooey/types/doc_search_page_request_citation_style.py
rename to src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_citation_style.py
index b47b3be..340070c 100644
--- a/src/gooey/types/doc_search_page_request_citation_style.py
+++ b/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_citation_style.py
@@ -2,7 +2,7 @@
import typing
-DocSearchPageRequestCitationStyle = typing.Union[
+AsyncFormVideoBotsRequestCitationStyle = typing.Union[
typing.Literal[
"number",
"title",
diff --git a/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_embedding_model.py b/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_embedding_model.py
new file mode 100644
index 0000000..f66aed4
--- /dev/null
+++ b/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_embedding_model.py
@@ -0,0 +1,18 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AsyncFormVideoBotsRequestEmbeddingModel = typing.Union[
+ typing.Literal[
+ "openai_3_large",
+ "openai_3_small",
+ "openai_ada_2",
+ "e5_large_v2",
+ "e5_base_v2",
+ "multilingual_e5_base",
+ "multilingual_e5_large",
+ "gte_large",
+ "gte_base",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_lipsync_model.py b/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_lipsync_model.py
new file mode 100644
index 0000000..88f876c
--- /dev/null
+++ b/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_lipsync_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AsyncFormVideoBotsRequestLipsyncModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any]
diff --git a/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_openai_tts_model.py b/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_openai_tts_model.py
new file mode 100644
index 0000000..66ac856
--- /dev/null
+++ b/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_openai_tts_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AsyncFormVideoBotsRequestOpenaiTtsModel = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any]
diff --git a/src/gooey/types/text_to_speech_page_request_openai_voice_name.py b/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_openai_voice_name.py
similarity index 74%
rename from src/gooey/types/text_to_speech_page_request_openai_voice_name.py
rename to src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_openai_voice_name.py
index efd862f..59f2cc3 100644
--- a/src/gooey/types/text_to_speech_page_request_openai_voice_name.py
+++ b/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_openai_voice_name.py
@@ -2,6 +2,6 @@
import typing
-TextToSpeechPageRequestOpenaiVoiceName = typing.Union[
+AsyncFormVideoBotsRequestOpenaiVoiceName = typing.Union[
typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any
]
diff --git a/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_response_format_type.py b/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_response_format_type.py
new file mode 100644
index 0000000..fa42a29
--- /dev/null
+++ b/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_response_format_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AsyncFormVideoBotsRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_selected_model.py b/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_selected_model.py
new file mode 100644
index 0000000..72d3fcd
--- /dev/null
+++ b/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_selected_model.py
@@ -0,0 +1,47 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AsyncFormVideoBotsRequestSelectedModel = typing.Union[
+ typing.Literal[
+ "gpt_4_o",
+ "gpt_4_o_mini",
+ "chatgpt_4_o",
+ "gpt_4_turbo_vision",
+ "gpt_4_vision",
+ "gpt_4_turbo",
+ "gpt_4",
+ "gpt_4_32k",
+ "gpt_3_5_turbo",
+ "gpt_3_5_turbo_16k",
+ "gpt_3_5_turbo_instruct",
+ "llama3_70b",
+ "llama_3_groq_70b_tool_use",
+ "llama3_8b",
+ "llama_3_groq_8b_tool_use",
+ "llama2_70b_chat",
+ "mixtral_8x7b_instruct_0_1",
+ "gemma_2_9b_it",
+ "gemma_7b_it",
+ "gemini_1_5_flash",
+ "gemini_1_5_pro",
+ "gemini_1_pro_vision",
+ "gemini_1_pro",
+ "palm2_chat",
+ "palm2_text",
+ "claude_3_5_sonnet",
+ "claude_3_opus",
+ "claude_3_sonnet",
+ "claude_3_haiku",
+ "sea_lion_7b_instruct",
+ "llama3_8b_cpt_sea_lion_v2_instruct",
+ "sarvam_2b",
+ "text_davinci_003",
+ "text_davinci_002",
+ "code_davinci_002",
+ "text_curie_001",
+ "text_babbage_001",
+ "text_ada_001",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_translation_model.py b/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_translation_model.py
new file mode 100644
index 0000000..38d5296
--- /dev/null
+++ b/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_translation_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AsyncFormVideoBotsRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any]
diff --git a/src/gooey/types/lipsync_tts_page_request_tts_provider.py b/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_tts_provider.py
similarity index 77%
rename from src/gooey/types/lipsync_tts_page_request_tts_provider.py
rename to src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_tts_provider.py
index 7e73fda..4142fc5 100644
--- a/src/gooey/types/lipsync_tts_page_request_tts_provider.py
+++ b/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_tts_provider.py
@@ -2,6 +2,6 @@
import typing
-LipsyncTtsPageRequestTtsProvider = typing.Union[
+AsyncFormVideoBotsRequestTtsProvider = typing.Union[
typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any
]
diff --git a/src/gooey/copilot_integrations/__init__.py b/src/gooey/copilot_integrations/__init__.py
index 8d66257..87847bb 100644
--- a/src/gooey/copilot_integrations/__init__.py
+++ b/src/gooey/copilot_integrations/__init__.py
@@ -1,29 +1,29 @@
# This file was auto-generated by Fern from our API Definition.
from .types import (
- CreateStreamRequestAsrModel,
- CreateStreamRequestCitationStyle,
- CreateStreamRequestEmbeddingModel,
- CreateStreamRequestLipsyncModel,
- CreateStreamRequestOpenaiTtsModel,
- CreateStreamRequestOpenaiVoiceName,
- CreateStreamRequestResponseFormatType,
- CreateStreamRequestSelectedModel,
- CreateStreamRequestTranslationModel,
- CreateStreamRequestTtsProvider,
+ VideoBotsStreamCreateRequestAsrModel,
+ VideoBotsStreamCreateRequestCitationStyle,
+ VideoBotsStreamCreateRequestEmbeddingModel,
+ VideoBotsStreamCreateRequestLipsyncModel,
+ VideoBotsStreamCreateRequestOpenaiTtsModel,
+ VideoBotsStreamCreateRequestOpenaiVoiceName,
+ VideoBotsStreamCreateRequestResponseFormatType,
+ VideoBotsStreamCreateRequestSelectedModel,
+ VideoBotsStreamCreateRequestTranslationModel,
+ VideoBotsStreamCreateRequestTtsProvider,
VideoBotsStreamResponse,
)
__all__ = [
- "CreateStreamRequestAsrModel",
- "CreateStreamRequestCitationStyle",
- "CreateStreamRequestEmbeddingModel",
- "CreateStreamRequestLipsyncModel",
- "CreateStreamRequestOpenaiTtsModel",
- "CreateStreamRequestOpenaiVoiceName",
- "CreateStreamRequestResponseFormatType",
- "CreateStreamRequestSelectedModel",
- "CreateStreamRequestTranslationModel",
- "CreateStreamRequestTtsProvider",
+ "VideoBotsStreamCreateRequestAsrModel",
+ "VideoBotsStreamCreateRequestCitationStyle",
+ "VideoBotsStreamCreateRequestEmbeddingModel",
+ "VideoBotsStreamCreateRequestLipsyncModel",
+ "VideoBotsStreamCreateRequestOpenaiTtsModel",
+ "VideoBotsStreamCreateRequestOpenaiVoiceName",
+ "VideoBotsStreamCreateRequestResponseFormatType",
+ "VideoBotsStreamCreateRequestSelectedModel",
+ "VideoBotsStreamCreateRequestTranslationModel",
+ "VideoBotsStreamCreateRequestTtsProvider",
"VideoBotsStreamResponse",
]
diff --git a/src/gooey/copilot_integrations/client.py b/src/gooey/copilot_integrations/client.py
index abc570c..0feaff7 100644
--- a/src/gooey/copilot_integrations/client.py
+++ b/src/gooey/copilot_integrations/client.py
@@ -17,16 +17,16 @@
from ..types.llm_tools import LlmTools
from ..types.recipe_function import RecipeFunction
from ..types.sad_talker_settings import SadTalkerSettings
-from .types.create_stream_request_asr_model import CreateStreamRequestAsrModel
-from .types.create_stream_request_citation_style import CreateStreamRequestCitationStyle
-from .types.create_stream_request_embedding_model import CreateStreamRequestEmbeddingModel
-from .types.create_stream_request_lipsync_model import CreateStreamRequestLipsyncModel
-from .types.create_stream_request_openai_tts_model import CreateStreamRequestOpenaiTtsModel
-from .types.create_stream_request_openai_voice_name import CreateStreamRequestOpenaiVoiceName
-from .types.create_stream_request_response_format_type import CreateStreamRequestResponseFormatType
-from .types.create_stream_request_selected_model import CreateStreamRequestSelectedModel
-from .types.create_stream_request_translation_model import CreateStreamRequestTranslationModel
-from .types.create_stream_request_tts_provider import CreateStreamRequestTtsProvider
+from .types.video_bots_stream_create_request_asr_model import VideoBotsStreamCreateRequestAsrModel
+from .types.video_bots_stream_create_request_citation_style import VideoBotsStreamCreateRequestCitationStyle
+from .types.video_bots_stream_create_request_embedding_model import VideoBotsStreamCreateRequestEmbeddingModel
+from .types.video_bots_stream_create_request_lipsync_model import VideoBotsStreamCreateRequestLipsyncModel
+from .types.video_bots_stream_create_request_openai_tts_model import VideoBotsStreamCreateRequestOpenaiTtsModel
+from .types.video_bots_stream_create_request_openai_voice_name import VideoBotsStreamCreateRequestOpenaiVoiceName
+from .types.video_bots_stream_create_request_response_format_type import VideoBotsStreamCreateRequestResponseFormatType
+from .types.video_bots_stream_create_request_selected_model import VideoBotsStreamCreateRequestSelectedModel
+from .types.video_bots_stream_create_request_translation_model import VideoBotsStreamCreateRequestTranslationModel
+from .types.video_bots_stream_create_request_tts_provider import VideoBotsStreamCreateRequestTtsProvider
from .types.video_bots_stream_response import VideoBotsStreamResponse
# this is used as the default value for optional parameters
@@ -41,71 +41,71 @@ def video_bots_stream_create(
self,
*,
integration_id: str,
- conversation_id: typing.Optional[str] = OMIT,
- user_id: typing.Optional[str] = OMIT,
- user_message_id: typing.Optional[str] = OMIT,
- button_pressed: typing.Optional[ButtonPressed] = OMIT,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- input_prompt: typing.Optional[str] = OMIT,
- input_audio: typing.Optional[str] = OMIT,
- input_images: typing.Optional[typing.Sequence[str]] = OMIT,
- input_documents: typing.Optional[typing.Sequence[str]] = OMIT,
- doc_extract_url: typing.Optional[str] = OMIT,
- messages: typing.Optional[typing.Sequence[ConversationEntry]] = OMIT,
- bot_script: typing.Optional[str] = OMIT,
- selected_model: typing.Optional[CreateStreamRequestSelectedModel] = OMIT,
- document_model: typing.Optional[str] = OMIT,
- task_instructions: typing.Optional[str] = OMIT,
- query_instructions: typing.Optional[str] = OMIT,
- keyword_instructions: typing.Optional[str] = OMIT,
- documents: typing.Optional[typing.Sequence[str]] = OMIT,
- max_references: typing.Optional[int] = OMIT,
- max_context_words: typing.Optional[int] = OMIT,
- scroll_jump: typing.Optional[int] = OMIT,
- embedding_model: typing.Optional[CreateStreamRequestEmbeddingModel] = OMIT,
- dense_weight: typing.Optional[float] = OMIT,
- citation_style: typing.Optional[CreateStreamRequestCitationStyle] = OMIT,
- use_url_shortener: typing.Optional[bool] = OMIT,
- asr_model: typing.Optional[CreateStreamRequestAsrModel] = OMIT,
- asr_language: typing.Optional[str] = OMIT,
- translation_model: typing.Optional[CreateStreamRequestTranslationModel] = OMIT,
- user_language: typing.Optional[str] = OMIT,
- input_glossary_document: typing.Optional[str] = OMIT,
- output_glossary_document: typing.Optional[str] = OMIT,
- lipsync_model: typing.Optional[CreateStreamRequestLipsyncModel] = OMIT,
- tools: typing.Optional[typing.Sequence[LlmTools]] = OMIT,
- avoid_repetition: typing.Optional[bool] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[float] = OMIT,
- max_tokens: typing.Optional[int] = OMIT,
- sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[CreateStreamRequestResponseFormatType] = OMIT,
- tts_provider: typing.Optional[CreateStreamRequestTtsProvider] = OMIT,
- uberduck_voice_name: typing.Optional[str] = OMIT,
- uberduck_speaking_rate: typing.Optional[float] = OMIT,
- google_voice_name: typing.Optional[str] = OMIT,
- google_speaking_rate: typing.Optional[float] = OMIT,
- google_pitch: typing.Optional[float] = OMIT,
- bark_history_prompt: typing.Optional[str] = OMIT,
- elevenlabs_voice_name: typing.Optional[str] = OMIT,
- elevenlabs_api_key: typing.Optional[str] = OMIT,
- elevenlabs_voice_id: typing.Optional[str] = OMIT,
- elevenlabs_model: typing.Optional[str] = OMIT,
- elevenlabs_stability: typing.Optional[float] = OMIT,
- elevenlabs_similarity_boost: typing.Optional[float] = OMIT,
- elevenlabs_style: typing.Optional[float] = OMIT,
- elevenlabs_speaker_boost: typing.Optional[bool] = OMIT,
- azure_voice_name: typing.Optional[str] = OMIT,
- openai_voice_name: typing.Optional[CreateStreamRequestOpenaiVoiceName] = OMIT,
- openai_tts_model: typing.Optional[CreateStreamRequestOpenaiTtsModel] = OMIT,
- input_face: typing.Optional[str] = OMIT,
- face_padding_top: typing.Optional[int] = OMIT,
- face_padding_bottom: typing.Optional[int] = OMIT,
- face_padding_left: typing.Optional[int] = OMIT,
- face_padding_right: typing.Optional[int] = OMIT,
- sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT,
- input_text: typing.Optional[str] = OMIT,
+ conversation_id: typing.Optional[str] = None,
+ user_id: typing.Optional[str] = None,
+ user_message_id: typing.Optional[str] = None,
+ button_pressed: typing.Optional[ButtonPressed] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ input_prompt: typing.Optional[str] = None,
+ input_audio: typing.Optional[str] = None,
+ input_images: typing.Optional[typing.List[str]] = None,
+ input_documents: typing.Optional[typing.List[str]] = None,
+ doc_extract_url: typing.Optional[str] = None,
+ messages: typing.Optional[typing.List[ConversationEntry]] = None,
+ bot_script: typing.Optional[str] = None,
+ selected_model: typing.Optional[VideoBotsStreamCreateRequestSelectedModel] = None,
+ document_model: typing.Optional[str] = None,
+ task_instructions: typing.Optional[str] = None,
+ query_instructions: typing.Optional[str] = None,
+ keyword_instructions: typing.Optional[str] = None,
+ documents: typing.Optional[typing.List[str]] = None,
+ max_references: typing.Optional[int] = None,
+ max_context_words: typing.Optional[int] = None,
+ scroll_jump: typing.Optional[int] = None,
+ embedding_model: typing.Optional[VideoBotsStreamCreateRequestEmbeddingModel] = None,
+ dense_weight: typing.Optional[float] = None,
+ citation_style: typing.Optional[VideoBotsStreamCreateRequestCitationStyle] = None,
+ use_url_shortener: typing.Optional[bool] = None,
+ asr_model: typing.Optional[VideoBotsStreamCreateRequestAsrModel] = None,
+ asr_language: typing.Optional[str] = None,
+ translation_model: typing.Optional[VideoBotsStreamCreateRequestTranslationModel] = None,
+ user_language: typing.Optional[str] = None,
+ input_glossary_document: typing.Optional[str] = None,
+ output_glossary_document: typing.Optional[str] = None,
+ lipsync_model: typing.Optional[VideoBotsStreamCreateRequestLipsyncModel] = None,
+ tools: typing.Optional[typing.List[LlmTools]] = None,
+ avoid_repetition: typing.Optional[bool] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[float] = None,
+ max_tokens: typing.Optional[int] = None,
+ sampling_temperature: typing.Optional[float] = None,
+ response_format_type: typing.Optional[VideoBotsStreamCreateRequestResponseFormatType] = None,
+ tts_provider: typing.Optional[VideoBotsStreamCreateRequestTtsProvider] = None,
+ uberduck_voice_name: typing.Optional[str] = None,
+ uberduck_speaking_rate: typing.Optional[float] = None,
+ google_voice_name: typing.Optional[str] = None,
+ google_speaking_rate: typing.Optional[float] = None,
+ google_pitch: typing.Optional[float] = None,
+ bark_history_prompt: typing.Optional[str] = None,
+ elevenlabs_voice_name: typing.Optional[str] = None,
+ elevenlabs_api_key: typing.Optional[str] = None,
+ elevenlabs_voice_id: typing.Optional[str] = None,
+ elevenlabs_model: typing.Optional[str] = None,
+ elevenlabs_stability: typing.Optional[float] = None,
+ elevenlabs_similarity_boost: typing.Optional[float] = None,
+ elevenlabs_style: typing.Optional[float] = None,
+ elevenlabs_speaker_boost: typing.Optional[bool] = None,
+ azure_voice_name: typing.Optional[str] = None,
+ openai_voice_name: typing.Optional[VideoBotsStreamCreateRequestOpenaiVoiceName] = None,
+ openai_tts_model: typing.Optional[VideoBotsStreamCreateRequestOpenaiTtsModel] = None,
+ input_face: typing.Optional[str] = None,
+ face_padding_top: typing.Optional[int] = None,
+ face_padding_bottom: typing.Optional[int] = None,
+ face_padding_left: typing.Optional[int] = None,
+ face_padding_right: typing.Optional[int] = None,
+ sadtalker_settings: typing.Optional[SadTalkerSettings] = None,
+ input_text: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> CreateStreamResponse:
"""
@@ -134,7 +134,7 @@ def video_bots_stream_create(
button_pressed : typing.Optional[ButtonPressed]
The button that was pressed by the user.
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
+ functions : typing.Optional[typing.List[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Any]]
Variables to be used as Jinja prompt templates and in functions as arguments
@@ -143,18 +143,18 @@ def video_bots_stream_create(
input_audio : typing.Optional[str]
- input_images : typing.Optional[typing.Sequence[str]]
+ input_images : typing.Optional[typing.List[str]]
- input_documents : typing.Optional[typing.Sequence[str]]
+ input_documents : typing.Optional[typing.List[str]]
doc_extract_url : typing.Optional[str]
Select a workflow to extract text from documents and images.
- messages : typing.Optional[typing.Sequence[ConversationEntry]]
+ messages : typing.Optional[typing.List[ConversationEntry]]
bot_script : typing.Optional[str]
- selected_model : typing.Optional[CreateStreamRequestSelectedModel]
+ selected_model : typing.Optional[VideoBotsStreamCreateRequestSelectedModel]
document_model : typing.Optional[str]
When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api))
@@ -165,7 +165,7 @@ def video_bots_stream_create(
keyword_instructions : typing.Optional[str]
- documents : typing.Optional[typing.Sequence[str]]
+ documents : typing.Optional[typing.List[str]]
max_references : typing.Optional[int]
@@ -173,7 +173,7 @@ def video_bots_stream_create(
scroll_jump : typing.Optional[int]
- embedding_model : typing.Optional[CreateStreamRequestEmbeddingModel]
+ embedding_model : typing.Optional[VideoBotsStreamCreateRequestEmbeddingModel]
dense_weight : typing.Optional[float]
@@ -181,17 +181,17 @@ def video_bots_stream_create(
Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
- citation_style : typing.Optional[CreateStreamRequestCitationStyle]
+ citation_style : typing.Optional[VideoBotsStreamCreateRequestCitationStyle]
use_url_shortener : typing.Optional[bool]
- asr_model : typing.Optional[CreateStreamRequestAsrModel]
+ asr_model : typing.Optional[VideoBotsStreamCreateRequestAsrModel]
Choose a model to transcribe incoming audio messages to text.
asr_language : typing.Optional[str]
Choose a language to transcribe incoming audio messages to text.
- translation_model : typing.Optional[CreateStreamRequestTranslationModel]
+ translation_model : typing.Optional[VideoBotsStreamCreateRequestTranslationModel]
user_language : typing.Optional[str]
Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages.
@@ -206,9 +206,9 @@ def video_bots_stream_create(
Translation Glossary for LLM Language (English) -> User Langauge
- lipsync_model : typing.Optional[CreateStreamRequestLipsyncModel]
+ lipsync_model : typing.Optional[VideoBotsStreamCreateRequestLipsyncModel]
- tools : typing.Optional[typing.Sequence[LlmTools]]
+ tools : typing.Optional[typing.List[LlmTools]]
Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
avoid_repetition : typing.Optional[bool]
@@ -221,9 +221,9 @@ def video_bots_stream_create(
sampling_temperature : typing.Optional[float]
- response_format_type : typing.Optional[CreateStreamRequestResponseFormatType]
+ response_format_type : typing.Optional[VideoBotsStreamCreateRequestResponseFormatType]
- tts_provider : typing.Optional[CreateStreamRequestTtsProvider]
+ tts_provider : typing.Optional[VideoBotsStreamCreateRequestTtsProvider]
uberduck_voice_name : typing.Optional[str]
@@ -256,9 +256,9 @@ def video_bots_stream_create(
azure_voice_name : typing.Optional[str]
- openai_voice_name : typing.Optional[CreateStreamRequestOpenaiVoiceName]
+ openai_voice_name : typing.Optional[VideoBotsStreamCreateRequestOpenaiVoiceName]
- openai_tts_model : typing.Optional[CreateStreamRequestOpenaiTtsModel]
+ openai_tts_model : typing.Optional[VideoBotsStreamCreateRequestOpenaiTtsModel]
input_face : typing.Optional[str]
@@ -297,7 +297,7 @@ def video_bots_stream_create(
_response = self._client_wrapper.httpx_client.request(
"v3/integrations/stream",
method="POST",
- json={
+ data={
"integration_id": integration_id,
"conversation_id": conversation_id,
"user_id": user_id,
@@ -365,6 +365,7 @@ def video_bots_stream_create(
"sadtalker_settings": sadtalker_settings,
"input_text": input_text,
},
+ files={},
request_options=request_options,
omit=OMIT,
)
@@ -439,71 +440,71 @@ async def video_bots_stream_create(
self,
*,
integration_id: str,
- conversation_id: typing.Optional[str] = OMIT,
- user_id: typing.Optional[str] = OMIT,
- user_message_id: typing.Optional[str] = OMIT,
- button_pressed: typing.Optional[ButtonPressed] = OMIT,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
- input_prompt: typing.Optional[str] = OMIT,
- input_audio: typing.Optional[str] = OMIT,
- input_images: typing.Optional[typing.Sequence[str]] = OMIT,
- input_documents: typing.Optional[typing.Sequence[str]] = OMIT,
- doc_extract_url: typing.Optional[str] = OMIT,
- messages: typing.Optional[typing.Sequence[ConversationEntry]] = OMIT,
- bot_script: typing.Optional[str] = OMIT,
- selected_model: typing.Optional[CreateStreamRequestSelectedModel] = OMIT,
- document_model: typing.Optional[str] = OMIT,
- task_instructions: typing.Optional[str] = OMIT,
- query_instructions: typing.Optional[str] = OMIT,
- keyword_instructions: typing.Optional[str] = OMIT,
- documents: typing.Optional[typing.Sequence[str]] = OMIT,
- max_references: typing.Optional[int] = OMIT,
- max_context_words: typing.Optional[int] = OMIT,
- scroll_jump: typing.Optional[int] = OMIT,
- embedding_model: typing.Optional[CreateStreamRequestEmbeddingModel] = OMIT,
- dense_weight: typing.Optional[float] = OMIT,
- citation_style: typing.Optional[CreateStreamRequestCitationStyle] = OMIT,
- use_url_shortener: typing.Optional[bool] = OMIT,
- asr_model: typing.Optional[CreateStreamRequestAsrModel] = OMIT,
- asr_language: typing.Optional[str] = OMIT,
- translation_model: typing.Optional[CreateStreamRequestTranslationModel] = OMIT,
- user_language: typing.Optional[str] = OMIT,
- input_glossary_document: typing.Optional[str] = OMIT,
- output_glossary_document: typing.Optional[str] = OMIT,
- lipsync_model: typing.Optional[CreateStreamRequestLipsyncModel] = OMIT,
- tools: typing.Optional[typing.Sequence[LlmTools]] = OMIT,
- avoid_repetition: typing.Optional[bool] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[float] = OMIT,
- max_tokens: typing.Optional[int] = OMIT,
- sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[CreateStreamRequestResponseFormatType] = OMIT,
- tts_provider: typing.Optional[CreateStreamRequestTtsProvider] = OMIT,
- uberduck_voice_name: typing.Optional[str] = OMIT,
- uberduck_speaking_rate: typing.Optional[float] = OMIT,
- google_voice_name: typing.Optional[str] = OMIT,
- google_speaking_rate: typing.Optional[float] = OMIT,
- google_pitch: typing.Optional[float] = OMIT,
- bark_history_prompt: typing.Optional[str] = OMIT,
- elevenlabs_voice_name: typing.Optional[str] = OMIT,
- elevenlabs_api_key: typing.Optional[str] = OMIT,
- elevenlabs_voice_id: typing.Optional[str] = OMIT,
- elevenlabs_model: typing.Optional[str] = OMIT,
- elevenlabs_stability: typing.Optional[float] = OMIT,
- elevenlabs_similarity_boost: typing.Optional[float] = OMIT,
- elevenlabs_style: typing.Optional[float] = OMIT,
- elevenlabs_speaker_boost: typing.Optional[bool] = OMIT,
- azure_voice_name: typing.Optional[str] = OMIT,
- openai_voice_name: typing.Optional[CreateStreamRequestOpenaiVoiceName] = OMIT,
- openai_tts_model: typing.Optional[CreateStreamRequestOpenaiTtsModel] = OMIT,
- input_face: typing.Optional[str] = OMIT,
- face_padding_top: typing.Optional[int] = OMIT,
- face_padding_bottom: typing.Optional[int] = OMIT,
- face_padding_left: typing.Optional[int] = OMIT,
- face_padding_right: typing.Optional[int] = OMIT,
- sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT,
- input_text: typing.Optional[str] = OMIT,
+ conversation_id: typing.Optional[str] = None,
+ user_id: typing.Optional[str] = None,
+ user_message_id: typing.Optional[str] = None,
+ button_pressed: typing.Optional[ButtonPressed] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ input_prompt: typing.Optional[str] = None,
+ input_audio: typing.Optional[str] = None,
+ input_images: typing.Optional[typing.List[str]] = None,
+ input_documents: typing.Optional[typing.List[str]] = None,
+ doc_extract_url: typing.Optional[str] = None,
+ messages: typing.Optional[typing.List[ConversationEntry]] = None,
+ bot_script: typing.Optional[str] = None,
+ selected_model: typing.Optional[VideoBotsStreamCreateRequestSelectedModel] = None,
+ document_model: typing.Optional[str] = None,
+ task_instructions: typing.Optional[str] = None,
+ query_instructions: typing.Optional[str] = None,
+ keyword_instructions: typing.Optional[str] = None,
+ documents: typing.Optional[typing.List[str]] = None,
+ max_references: typing.Optional[int] = None,
+ max_context_words: typing.Optional[int] = None,
+ scroll_jump: typing.Optional[int] = None,
+ embedding_model: typing.Optional[VideoBotsStreamCreateRequestEmbeddingModel] = None,
+ dense_weight: typing.Optional[float] = None,
+ citation_style: typing.Optional[VideoBotsStreamCreateRequestCitationStyle] = None,
+ use_url_shortener: typing.Optional[bool] = None,
+ asr_model: typing.Optional[VideoBotsStreamCreateRequestAsrModel] = None,
+ asr_language: typing.Optional[str] = None,
+ translation_model: typing.Optional[VideoBotsStreamCreateRequestTranslationModel] = None,
+ user_language: typing.Optional[str] = None,
+ input_glossary_document: typing.Optional[str] = None,
+ output_glossary_document: typing.Optional[str] = None,
+ lipsync_model: typing.Optional[VideoBotsStreamCreateRequestLipsyncModel] = None,
+ tools: typing.Optional[typing.List[LlmTools]] = None,
+ avoid_repetition: typing.Optional[bool] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[float] = None,
+ max_tokens: typing.Optional[int] = None,
+ sampling_temperature: typing.Optional[float] = None,
+ response_format_type: typing.Optional[VideoBotsStreamCreateRequestResponseFormatType] = None,
+ tts_provider: typing.Optional[VideoBotsStreamCreateRequestTtsProvider] = None,
+ uberduck_voice_name: typing.Optional[str] = None,
+ uberduck_speaking_rate: typing.Optional[float] = None,
+ google_voice_name: typing.Optional[str] = None,
+ google_speaking_rate: typing.Optional[float] = None,
+ google_pitch: typing.Optional[float] = None,
+ bark_history_prompt: typing.Optional[str] = None,
+ elevenlabs_voice_name: typing.Optional[str] = None,
+ elevenlabs_api_key: typing.Optional[str] = None,
+ elevenlabs_voice_id: typing.Optional[str] = None,
+ elevenlabs_model: typing.Optional[str] = None,
+ elevenlabs_stability: typing.Optional[float] = None,
+ elevenlabs_similarity_boost: typing.Optional[float] = None,
+ elevenlabs_style: typing.Optional[float] = None,
+ elevenlabs_speaker_boost: typing.Optional[bool] = None,
+ azure_voice_name: typing.Optional[str] = None,
+ openai_voice_name: typing.Optional[VideoBotsStreamCreateRequestOpenaiVoiceName] = None,
+ openai_tts_model: typing.Optional[VideoBotsStreamCreateRequestOpenaiTtsModel] = None,
+ input_face: typing.Optional[str] = None,
+ face_padding_top: typing.Optional[int] = None,
+ face_padding_bottom: typing.Optional[int] = None,
+ face_padding_left: typing.Optional[int] = None,
+ face_padding_right: typing.Optional[int] = None,
+ sadtalker_settings: typing.Optional[SadTalkerSettings] = None,
+ input_text: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> CreateStreamResponse:
"""
@@ -532,7 +533,7 @@ async def video_bots_stream_create(
button_pressed : typing.Optional[ButtonPressed]
The button that was pressed by the user.
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
+ functions : typing.Optional[typing.List[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Any]]
Variables to be used as Jinja prompt templates and in functions as arguments
@@ -541,18 +542,18 @@ async def video_bots_stream_create(
input_audio : typing.Optional[str]
- input_images : typing.Optional[typing.Sequence[str]]
+ input_images : typing.Optional[typing.List[str]]
- input_documents : typing.Optional[typing.Sequence[str]]
+ input_documents : typing.Optional[typing.List[str]]
doc_extract_url : typing.Optional[str]
Select a workflow to extract text from documents and images.
- messages : typing.Optional[typing.Sequence[ConversationEntry]]
+ messages : typing.Optional[typing.List[ConversationEntry]]
bot_script : typing.Optional[str]
- selected_model : typing.Optional[CreateStreamRequestSelectedModel]
+ selected_model : typing.Optional[VideoBotsStreamCreateRequestSelectedModel]
document_model : typing.Optional[str]
When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api))
@@ -563,7 +564,7 @@ async def video_bots_stream_create(
keyword_instructions : typing.Optional[str]
- documents : typing.Optional[typing.Sequence[str]]
+ documents : typing.Optional[typing.List[str]]
max_references : typing.Optional[int]
@@ -571,7 +572,7 @@ async def video_bots_stream_create(
scroll_jump : typing.Optional[int]
- embedding_model : typing.Optional[CreateStreamRequestEmbeddingModel]
+ embedding_model : typing.Optional[VideoBotsStreamCreateRequestEmbeddingModel]
dense_weight : typing.Optional[float]
@@ -579,17 +580,17 @@ async def video_bots_stream_create(
Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
- citation_style : typing.Optional[CreateStreamRequestCitationStyle]
+ citation_style : typing.Optional[VideoBotsStreamCreateRequestCitationStyle]
use_url_shortener : typing.Optional[bool]
- asr_model : typing.Optional[CreateStreamRequestAsrModel]
+ asr_model : typing.Optional[VideoBotsStreamCreateRequestAsrModel]
Choose a model to transcribe incoming audio messages to text.
asr_language : typing.Optional[str]
Choose a language to transcribe incoming audio messages to text.
- translation_model : typing.Optional[CreateStreamRequestTranslationModel]
+ translation_model : typing.Optional[VideoBotsStreamCreateRequestTranslationModel]
user_language : typing.Optional[str]
Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages.
@@ -604,9 +605,9 @@ async def video_bots_stream_create(
Translation Glossary for LLM Language (English) -> User Langauge
- lipsync_model : typing.Optional[CreateStreamRequestLipsyncModel]
+ lipsync_model : typing.Optional[VideoBotsStreamCreateRequestLipsyncModel]
- tools : typing.Optional[typing.Sequence[LlmTools]]
+ tools : typing.Optional[typing.List[LlmTools]]
Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
avoid_repetition : typing.Optional[bool]
@@ -619,9 +620,9 @@ async def video_bots_stream_create(
sampling_temperature : typing.Optional[float]
- response_format_type : typing.Optional[CreateStreamRequestResponseFormatType]
+ response_format_type : typing.Optional[VideoBotsStreamCreateRequestResponseFormatType]
- tts_provider : typing.Optional[CreateStreamRequestTtsProvider]
+ tts_provider : typing.Optional[VideoBotsStreamCreateRequestTtsProvider]
uberduck_voice_name : typing.Optional[str]
@@ -654,9 +655,9 @@ async def video_bots_stream_create(
azure_voice_name : typing.Optional[str]
- openai_voice_name : typing.Optional[CreateStreamRequestOpenaiVoiceName]
+ openai_voice_name : typing.Optional[VideoBotsStreamCreateRequestOpenaiVoiceName]
- openai_tts_model : typing.Optional[CreateStreamRequestOpenaiTtsModel]
+ openai_tts_model : typing.Optional[VideoBotsStreamCreateRequestOpenaiTtsModel]
input_face : typing.Optional[str]
@@ -703,7 +704,7 @@ async def main() -> None:
_response = await self._client_wrapper.httpx_client.request(
"v3/integrations/stream",
method="POST",
- json={
+ data={
"integration_id": integration_id,
"conversation_id": conversation_id,
"user_id": user_id,
@@ -771,6 +772,7 @@ async def main() -> None:
"sadtalker_settings": sadtalker_settings,
"input_text": input_text,
},
+ files={},
request_options=request_options,
omit=OMIT,
)
diff --git a/src/gooey/copilot_integrations/types/__init__.py b/src/gooey/copilot_integrations/types/__init__.py
index 1224051..6e93ace 100644
--- a/src/gooey/copilot_integrations/types/__init__.py
+++ b/src/gooey/copilot_integrations/types/__init__.py
@@ -1,27 +1,27 @@
# This file was auto-generated by Fern from our API Definition.
-from .create_stream_request_asr_model import CreateStreamRequestAsrModel
-from .create_stream_request_citation_style import CreateStreamRequestCitationStyle
-from .create_stream_request_embedding_model import CreateStreamRequestEmbeddingModel
-from .create_stream_request_lipsync_model import CreateStreamRequestLipsyncModel
-from .create_stream_request_openai_tts_model import CreateStreamRequestOpenaiTtsModel
-from .create_stream_request_openai_voice_name import CreateStreamRequestOpenaiVoiceName
-from .create_stream_request_response_format_type import CreateStreamRequestResponseFormatType
-from .create_stream_request_selected_model import CreateStreamRequestSelectedModel
-from .create_stream_request_translation_model import CreateStreamRequestTranslationModel
-from .create_stream_request_tts_provider import CreateStreamRequestTtsProvider
+from .video_bots_stream_create_request_asr_model import VideoBotsStreamCreateRequestAsrModel
+from .video_bots_stream_create_request_citation_style import VideoBotsStreamCreateRequestCitationStyle
+from .video_bots_stream_create_request_embedding_model import VideoBotsStreamCreateRequestEmbeddingModel
+from .video_bots_stream_create_request_lipsync_model import VideoBotsStreamCreateRequestLipsyncModel
+from .video_bots_stream_create_request_openai_tts_model import VideoBotsStreamCreateRequestOpenaiTtsModel
+from .video_bots_stream_create_request_openai_voice_name import VideoBotsStreamCreateRequestOpenaiVoiceName
+from .video_bots_stream_create_request_response_format_type import VideoBotsStreamCreateRequestResponseFormatType
+from .video_bots_stream_create_request_selected_model import VideoBotsStreamCreateRequestSelectedModel
+from .video_bots_stream_create_request_translation_model import VideoBotsStreamCreateRequestTranslationModel
+from .video_bots_stream_create_request_tts_provider import VideoBotsStreamCreateRequestTtsProvider
from .video_bots_stream_response import VideoBotsStreamResponse
__all__ = [
- "CreateStreamRequestAsrModel",
- "CreateStreamRequestCitationStyle",
- "CreateStreamRequestEmbeddingModel",
- "CreateStreamRequestLipsyncModel",
- "CreateStreamRequestOpenaiTtsModel",
- "CreateStreamRequestOpenaiVoiceName",
- "CreateStreamRequestResponseFormatType",
- "CreateStreamRequestSelectedModel",
- "CreateStreamRequestTranslationModel",
- "CreateStreamRequestTtsProvider",
+ "VideoBotsStreamCreateRequestAsrModel",
+ "VideoBotsStreamCreateRequestCitationStyle",
+ "VideoBotsStreamCreateRequestEmbeddingModel",
+ "VideoBotsStreamCreateRequestLipsyncModel",
+ "VideoBotsStreamCreateRequestOpenaiTtsModel",
+ "VideoBotsStreamCreateRequestOpenaiVoiceName",
+ "VideoBotsStreamCreateRequestResponseFormatType",
+ "VideoBotsStreamCreateRequestSelectedModel",
+ "VideoBotsStreamCreateRequestTranslationModel",
+ "VideoBotsStreamCreateRequestTtsProvider",
"VideoBotsStreamResponse",
]
diff --git a/src/gooey/copilot_integrations/types/create_stream_request_lipsync_model.py b/src/gooey/copilot_integrations/types/create_stream_request_lipsync_model.py
deleted file mode 100644
index c207d45..0000000
--- a/src/gooey/copilot_integrations/types/create_stream_request_lipsync_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-CreateStreamRequestLipsyncModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any]
diff --git a/src/gooey/copilot_integrations/types/create_stream_request_response_format_type.py b/src/gooey/copilot_integrations/types/create_stream_request_response_format_type.py
deleted file mode 100644
index dc5024d..0000000
--- a/src/gooey/copilot_integrations/types/create_stream_request_response_format_type.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-CreateStreamRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/copilot_integrations/types/create_stream_request_translation_model.py b/src/gooey/copilot_integrations/types/create_stream_request_translation_model.py
deleted file mode 100644
index 3876937..0000000
--- a/src/gooey/copilot_integrations/types/create_stream_request_translation_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-CreateStreamRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any]
diff --git a/src/gooey/types/asr_page_request_selected_model.py b/src/gooey/copilot_integrations/types/video_bots_stream_create_request_asr_model.py
similarity index 89%
rename from src/gooey/types/asr_page_request_selected_model.py
rename to src/gooey/copilot_integrations/types/video_bots_stream_create_request_asr_model.py
index 4e80d3c..0f2b04b 100644
--- a/src/gooey/types/asr_page_request_selected_model.py
+++ b/src/gooey/copilot_integrations/types/video_bots_stream_create_request_asr_model.py
@@ -2,7 +2,7 @@
import typing
-AsrPageRequestSelectedModel = typing.Union[
+VideoBotsStreamCreateRequestAsrModel = typing.Union[
typing.Literal[
"whisper_large_v2",
"whisper_large_v3",
diff --git a/src/gooey/types/video_bots_page_request_citation_style.py b/src/gooey/copilot_integrations/types/video_bots_stream_create_request_citation_style.py
similarity index 89%
rename from src/gooey/types/video_bots_page_request_citation_style.py
rename to src/gooey/copilot_integrations/types/video_bots_stream_create_request_citation_style.py
index dc3630b..eb80dca 100644
--- a/src/gooey/types/video_bots_page_request_citation_style.py
+++ b/src/gooey/copilot_integrations/types/video_bots_stream_create_request_citation_style.py
@@ -2,7 +2,7 @@
import typing
-VideoBotsPageRequestCitationStyle = typing.Union[
+VideoBotsStreamCreateRequestCitationStyle = typing.Union[
typing.Literal[
"number",
"title",
diff --git a/src/gooey/copilot_integrations/types/video_bots_stream_create_request_embedding_model.py b/src/gooey/copilot_integrations/types/video_bots_stream_create_request_embedding_model.py
new file mode 100644
index 0000000..56f2399
--- /dev/null
+++ b/src/gooey/copilot_integrations/types/video_bots_stream_create_request_embedding_model.py
@@ -0,0 +1,18 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+VideoBotsStreamCreateRequestEmbeddingModel = typing.Union[
+ typing.Literal[
+ "openai_3_large",
+ "openai_3_small",
+ "openai_ada_2",
+ "e5_large_v2",
+ "e5_base_v2",
+ "multilingual_e5_base",
+ "multilingual_e5_large",
+ "gte_large",
+ "gte_base",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/copilot_integrations/types/video_bots_stream_create_request_lipsync_model.py b/src/gooey/copilot_integrations/types/video_bots_stream_create_request_lipsync_model.py
new file mode 100644
index 0000000..a499eec
--- /dev/null
+++ b/src/gooey/copilot_integrations/types/video_bots_stream_create_request_lipsync_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+VideoBotsStreamCreateRequestLipsyncModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any]
diff --git a/src/gooey/copilot_integrations/types/video_bots_stream_create_request_openai_tts_model.py b/src/gooey/copilot_integrations/types/video_bots_stream_create_request_openai_tts_model.py
new file mode 100644
index 0000000..05802c9
--- /dev/null
+++ b/src/gooey/copilot_integrations/types/video_bots_stream_create_request_openai_tts_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+VideoBotsStreamCreateRequestOpenaiTtsModel = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any]
diff --git a/src/gooey/types/lipsync_tts_page_request_openai_voice_name.py b/src/gooey/copilot_integrations/types/video_bots_stream_create_request_openai_voice_name.py
similarity index 73%
rename from src/gooey/types/lipsync_tts_page_request_openai_voice_name.py
rename to src/gooey/copilot_integrations/types/video_bots_stream_create_request_openai_voice_name.py
index 4873924..60a9be7 100644
--- a/src/gooey/types/lipsync_tts_page_request_openai_voice_name.py
+++ b/src/gooey/copilot_integrations/types/video_bots_stream_create_request_openai_voice_name.py
@@ -2,6 +2,6 @@
import typing
-LipsyncTtsPageRequestOpenaiVoiceName = typing.Union[
+VideoBotsStreamCreateRequestOpenaiVoiceName = typing.Union[
typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any
]
diff --git a/src/gooey/types/social_lookup_email_page_request_response_format_type.py b/src/gooey/copilot_integrations/types/video_bots_stream_create_request_response_format_type.py
similarity index 66%
rename from src/gooey/types/social_lookup_email_page_request_response_format_type.py
rename to src/gooey/copilot_integrations/types/video_bots_stream_create_request_response_format_type.py
index 46c50db..fbf245e 100644
--- a/src/gooey/types/social_lookup_email_page_request_response_format_type.py
+++ b/src/gooey/copilot_integrations/types/video_bots_stream_create_request_response_format_type.py
@@ -2,4 +2,4 @@
import typing
-SocialLookupEmailPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
+VideoBotsStreamCreateRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/copilot_integrations/types/video_bots_stream_create_request_selected_model.py b/src/gooey/copilot_integrations/types/video_bots_stream_create_request_selected_model.py
new file mode 100644
index 0000000..52c9f20
--- /dev/null
+++ b/src/gooey/copilot_integrations/types/video_bots_stream_create_request_selected_model.py
@@ -0,0 +1,47 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+VideoBotsStreamCreateRequestSelectedModel = typing.Union[
+ typing.Literal[
+ "gpt_4_o",
+ "gpt_4_o_mini",
+ "chatgpt_4_o",
+ "gpt_4_turbo_vision",
+ "gpt_4_vision",
+ "gpt_4_turbo",
+ "gpt_4",
+ "gpt_4_32k",
+ "gpt_3_5_turbo",
+ "gpt_3_5_turbo_16k",
+ "gpt_3_5_turbo_instruct",
+ "llama3_70b",
+ "llama_3_groq_70b_tool_use",
+ "llama3_8b",
+ "llama_3_groq_8b_tool_use",
+ "llama2_70b_chat",
+ "mixtral_8x7b_instruct_0_1",
+ "gemma_2_9b_it",
+ "gemma_7b_it",
+ "gemini_1_5_flash",
+ "gemini_1_5_pro",
+ "gemini_1_pro_vision",
+ "gemini_1_pro",
+ "palm2_chat",
+ "palm2_text",
+ "claude_3_5_sonnet",
+ "claude_3_opus",
+ "claude_3_sonnet",
+ "claude_3_haiku",
+ "sea_lion_7b_instruct",
+ "llama3_8b_cpt_sea_lion_v2_instruct",
+ "sarvam_2b",
+ "text_davinci_003",
+ "text_davinci_002",
+ "code_davinci_002",
+ "text_curie_001",
+ "text_babbage_001",
+ "text_ada_001",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/copilot_integrations/types/video_bots_stream_create_request_translation_model.py b/src/gooey/copilot_integrations/types/video_bots_stream_create_request_translation_model.py
new file mode 100644
index 0000000..db21082
--- /dev/null
+++ b/src/gooey/copilot_integrations/types/video_bots_stream_create_request_translation_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+VideoBotsStreamCreateRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any]
diff --git a/src/gooey/types/text_to_speech_page_request_tts_provider.py b/src/gooey/copilot_integrations/types/video_bots_stream_create_request_tts_provider.py
similarity index 76%
rename from src/gooey/types/text_to_speech_page_request_tts_provider.py
rename to src/gooey/copilot_integrations/types/video_bots_stream_create_request_tts_provider.py
index a6b8938..581f80a 100644
--- a/src/gooey/types/text_to_speech_page_request_tts_provider.py
+++ b/src/gooey/copilot_integrations/types/video_bots_stream_create_request_tts_provider.py
@@ -2,6 +2,6 @@
import typing
-TextToSpeechPageRequestTtsProvider = typing.Union[
+VideoBotsStreamCreateRequestTtsProvider = typing.Union[
typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any
]
diff --git a/src/gooey/core/client_wrapper.py b/src/gooey/core/client_wrapper.py
index 6006fe1..7f52b8d 100644
--- a/src/gooey/core/client_wrapper.py
+++ b/src/gooey/core/client_wrapper.py
@@ -23,7 +23,7 @@ def get_headers(self) -> typing.Dict[str, str]:
headers: typing.Dict[str, str] = {
"X-Fern-Language": "Python",
"X-Fern-SDK-Name": "gooeyai",
- "X-Fern-SDK-Version": "0.0.1-beta5",
+ "X-Fern-SDK-Version": "0.0.1-beta6",
}
headers["Authorization"] = f"Bearer {self._get_api_key()}"
return headers
diff --git a/src/gooey/create_a_perfect_seo_optimized_title_paragraph/__init__.py b/src/gooey/create_a_perfect_seo_optimized_title_paragraph/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/create_a_perfect_seo_optimized_title_paragraph/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/create_a_perfect_seo_optimized_title_paragraph/client.py b/src/gooey/create_a_perfect_seo_optimized_title_paragraph/client.py
new file mode 100644
index 0000000..da651fc
--- /dev/null
+++ b/src/gooey/create_a_perfect_seo_optimized_title_paragraph/client.py
@@ -0,0 +1,133 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import parse_obj_as
+from ..core.request_options import RequestOptions
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.seo_summary_page_status_response import SeoSummaryPageStatusResponse
+
+
+class CreateAPerfectSeoOptimizedTitleParagraphClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def status_seo_summary(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> SeoSummaryPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ SeoSummaryPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.create_a_perfect_seo_optimized_title_paragraph.status_seo_summary(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/SEOSummary/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(SeoSummaryPageStatusResponse, parse_obj_as(type_=SeoSummaryPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncCreateAPerfectSeoOptimizedTitleParagraphClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def status_seo_summary(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> SeoSummaryPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ SeoSummaryPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.create_a_perfect_seo_optimized_title_paragraph.status_seo_summary(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/SEOSummary/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(SeoSummaryPageStatusResponse, parse_obj_as(type_=SeoSummaryPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/edit_an_image_with_ai_prompt/__init__.py b/src/gooey/edit_an_image_with_ai_prompt/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/edit_an_image_with_ai_prompt/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/edit_an_image_with_ai_prompt/client.py b/src/gooey/edit_an_image_with_ai_prompt/client.py
new file mode 100644
index 0000000..c8b36ee
--- /dev/null
+++ b/src/gooey/edit_an_image_with_ai_prompt/client.py
@@ -0,0 +1,133 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import parse_obj_as
+from ..core.request_options import RequestOptions
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.img2img_page_status_response import Img2ImgPageStatusResponse
+
+
+class EditAnImageWithAiPromptClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def status_img2img(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> Img2ImgPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ Img2ImgPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.edit_an_image_with_ai_prompt.status_img2img(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/Img2Img/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(Img2ImgPageStatusResponse, parse_obj_as(type_=Img2ImgPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncEditAnImageWithAiPromptClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def status_img2img(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> Img2ImgPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ Img2ImgPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.edit_an_image_with_ai_prompt.status_img2img(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/Img2Img/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(Img2ImgPageStatusResponse, parse_obj_as(type_=Img2ImgPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/embeddings/client.py b/src/gooey/embeddings/client.py
index 84f0604..3cbe583 100644
--- a/src/gooey/embeddings/client.py
+++ b/src/gooey/embeddings/client.py
@@ -5,23 +5,35 @@
from ..core.api_error import ApiError
from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import parse_obj_as
from ..core.request_options import RequestOptions
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.embeddings_page_status_response import EmbeddingsPageStatusResponse
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
class EmbeddingsClient:
def __init__(self, *, client_wrapper: SyncClientWrapper):
self._client_wrapper = client_wrapper
- def post(self, *, request_options: typing.Optional[RequestOptions] = None) -> None:
+ def status_embeddings(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> EmbeddingsPageStatusResponse:
"""
Parameters
----------
+ run_id : str
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- None
+ EmbeddingsPageStatusResponse
+ Successful Response
Examples
--------
@@ -30,14 +42,28 @@ def post(self, *, request_options: typing.Optional[RequestOptions] = None) -> No
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.embeddings.post()
+ client.embeddings.status_embeddings(
+ run_id="run_id",
+ )
"""
_response = self._client_wrapper.httpx_client.request(
- "v2/embeddings/", method="POST", request_options=request_options
+ "v3/embeddings/status", method="GET", params={"run_id": run_id}, request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return
+ return typing.cast(EmbeddingsPageStatusResponse, parse_obj_as(type_=EmbeddingsPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
@@ -48,16 +74,21 @@ class AsyncEmbeddingsClient:
def __init__(self, *, client_wrapper: AsyncClientWrapper):
self._client_wrapper = client_wrapper
- async def post(self, *, request_options: typing.Optional[RequestOptions] = None) -> None:
+ async def status_embeddings(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> EmbeddingsPageStatusResponse:
"""
Parameters
----------
+ run_id : str
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- None
+ EmbeddingsPageStatusResponse
+ Successful Response
Examples
--------
@@ -71,17 +102,31 @@ async def post(self, *, request_options: typing.Optional[RequestOptions] = None)
async def main() -> None:
- await client.embeddings.post()
+ await client.embeddings.status_embeddings(
+ run_id="run_id",
+ )
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v2/embeddings/", method="POST", request_options=request_options
+ "v3/embeddings/status", method="GET", params={"run_id": run_id}, request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return
+ return typing.cast(EmbeddingsPageStatusResponse, parse_obj_as(type_=EmbeddingsPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
diff --git a/src/gooey/evaluator/__init__.py b/src/gooey/evaluator/__init__.py
index f3ea265..753a030 100644
--- a/src/gooey/evaluator/__init__.py
+++ b/src/gooey/evaluator/__init__.py
@@ -1,2 +1,5 @@
# This file was auto-generated by Fern from our API Definition.
+from .types import AsyncFormBulkEvalRequestResponseFormatType, AsyncFormBulkEvalRequestSelectedModel
+
+__all__ = ["AsyncFormBulkEvalRequestResponseFormatType", "AsyncFormBulkEvalRequestSelectedModel"]
diff --git a/src/gooey/evaluator/client.py b/src/gooey/evaluator/client.py
index 1e4720f..e2a469c 100644
--- a/src/gooey/evaluator/client.py
+++ b/src/gooey/evaluator/client.py
@@ -12,10 +12,19 @@
from ..errors.payment_required_error import PaymentRequiredError
from ..errors.too_many_requests_error import TooManyRequestsError
from ..errors.unprocessable_entity_error import UnprocessableEntityError
-from ..types.body_async_form_bulk_eval import BodyAsyncFormBulkEval
+from ..types.agg_function import AggFunction
+from ..types.bulk_eval_page_status_response import BulkEvalPageStatusResponse
+from ..types.eval_prompt import EvalPrompt
from ..types.failed_reponse_model_v2 import FailedReponseModelV2
from ..types.generic_error_response import GenericErrorResponse
from ..types.http_validation_error import HttpValidationError
+from ..types.recipe_function import RecipeFunction
+from ..types.run_settings import RunSettings
+from .types.async_form_bulk_eval_request_response_format_type import AsyncFormBulkEvalRequestResponseFormatType
+from .types.async_form_bulk_eval_request_selected_model import AsyncFormBulkEvalRequestSelectedModel
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
class EvaluatorClient:
@@ -23,19 +32,74 @@ def __init__(self, *, client_wrapper: SyncClientWrapper):
self._client_wrapper = client_wrapper
def async_form_bulk_eval(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormBulkEval:
+ self,
+ *,
+ documents: typing.List[str],
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ eval_prompts: typing.Optional[typing.List[EvalPrompt]] = None,
+ agg_functions: typing.Optional[typing.List[AggFunction]] = None,
+ selected_model: typing.Optional[AsyncFormBulkEvalRequestSelectedModel] = None,
+ avoid_repetition: typing.Optional[bool] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[float] = None,
+ max_tokens: typing.Optional[int] = None,
+ sampling_temperature: typing.Optional[float] = None,
+ response_format_type: typing.Optional[AsyncFormBulkEvalRequestResponseFormatType] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> BulkEvalPageStatusResponse:
"""
Parameters
----------
+ documents : typing.List[str]
+
+ Upload or link to a CSV or google sheet that contains your sample input data.
+ For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs.
+ Remember to includes header names in your CSV too.
+
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ eval_prompts : typing.Optional[typing.List[EvalPrompt]]
+
+ Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values.
+ _The `columns` dictionary can be used to reference the spreadsheet columns._
+
+
+ agg_functions : typing.Optional[typing.List[AggFunction]]
+
+ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats).
+
+
+ selected_model : typing.Optional[AsyncFormBulkEvalRequestSelectedModel]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ response_format_type : typing.Optional[AsyncFormBulkEvalRequestResponseFormatType]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormBulkEval
+ BulkEvalPageStatusResponse
Successful Response
Examples
@@ -45,14 +109,36 @@ def async_form_bulk_eval(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.evaluator.async_form_bulk_eval()
+ client.evaluator.async_form_bulk_eval(
+ documents=["documents"],
+ )
"""
_response = self._client_wrapper.httpx_client.request(
- "v3/bulk-eval/async/form", method="POST", params={"example_id": example_id}, request_options=request_options
+ "v3/bulk-eval/async/form",
+ method="POST",
+ params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "documents": documents,
+ "eval_prompts": eval_prompts,
+ "agg_functions": agg_functions,
+ "selected_model": selected_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "response_format_type": response_format_type,
+ "settings": settings,
+ },
+ files={},
+ request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormBulkEval, parse_obj_as(type_=BodyAsyncFormBulkEval, object_=_response.json())) # type: ignore
+ return typing.cast(BulkEvalPageStatusResponse, parse_obj_as(type_=BulkEvalPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -78,25 +164,130 @@ def async_form_bulk_eval(
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
+ def status_bulk_eval(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> BulkEvalPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ BulkEvalPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.evaluator.status_bulk_eval(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/bulk-eval/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(BulkEvalPageStatusResponse, parse_obj_as(type_=BulkEvalPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
class AsyncEvaluatorClient:
def __init__(self, *, client_wrapper: AsyncClientWrapper):
self._client_wrapper = client_wrapper
async def async_form_bulk_eval(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormBulkEval:
+ self,
+ *,
+ documents: typing.List[str],
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ eval_prompts: typing.Optional[typing.List[EvalPrompt]] = None,
+ agg_functions: typing.Optional[typing.List[AggFunction]] = None,
+ selected_model: typing.Optional[AsyncFormBulkEvalRequestSelectedModel] = None,
+ avoid_repetition: typing.Optional[bool] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[float] = None,
+ max_tokens: typing.Optional[int] = None,
+ sampling_temperature: typing.Optional[float] = None,
+ response_format_type: typing.Optional[AsyncFormBulkEvalRequestResponseFormatType] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> BulkEvalPageStatusResponse:
"""
Parameters
----------
+ documents : typing.List[str]
+
+ Upload or link to a CSV or google sheet that contains your sample input data.
+ For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs.
+ Remember to includes header names in your CSV too.
+
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ eval_prompts : typing.Optional[typing.List[EvalPrompt]]
+
+ Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values.
+ _The `columns` dictionary can be used to reference the spreadsheet columns._
+
+
+ agg_functions : typing.Optional[typing.List[AggFunction]]
+
+ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats).
+
+
+ selected_model : typing.Optional[AsyncFormBulkEvalRequestSelectedModel]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ response_format_type : typing.Optional[AsyncFormBulkEvalRequestResponseFormatType]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormBulkEval
+ BulkEvalPageStatusResponse
Successful Response
Examples
@@ -111,17 +302,39 @@ async def async_form_bulk_eval(
async def main() -> None:
- await client.evaluator.async_form_bulk_eval()
+ await client.evaluator.async_form_bulk_eval(
+ documents=["documents"],
+ )
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v3/bulk-eval/async/form", method="POST", params={"example_id": example_id}, request_options=request_options
+ "v3/bulk-eval/async/form",
+ method="POST",
+ params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "documents": documents,
+ "eval_prompts": eval_prompts,
+ "agg_functions": agg_functions,
+ "selected_model": selected_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "response_format_type": response_format_type,
+ "settings": settings,
+ },
+ files={},
+ request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormBulkEval, parse_obj_as(type_=BodyAsyncFormBulkEval, object_=_response.json())) # type: ignore
+ return typing.cast(BulkEvalPageStatusResponse, parse_obj_as(type_=BulkEvalPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -146,3 +359,61 @@ async def main() -> None:
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def status_bulk_eval(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> BulkEvalPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ BulkEvalPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.evaluator.status_bulk_eval(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/bulk-eval/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(BulkEvalPageStatusResponse, parse_obj_as(type_=BulkEvalPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/evaluator/types/__init__.py b/src/gooey/evaluator/types/__init__.py
new file mode 100644
index 0000000..5d6d502
--- /dev/null
+++ b/src/gooey/evaluator/types/__init__.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .async_form_bulk_eval_request_response_format_type import AsyncFormBulkEvalRequestResponseFormatType
+from .async_form_bulk_eval_request_selected_model import AsyncFormBulkEvalRequestSelectedModel
+
+__all__ = ["AsyncFormBulkEvalRequestResponseFormatType", "AsyncFormBulkEvalRequestSelectedModel"]
diff --git a/src/gooey/types/related_qn_a_doc_page_request_response_format_type.py b/src/gooey/evaluator/types/async_form_bulk_eval_request_response_format_type.py
similarity index 66%
rename from src/gooey/types/related_qn_a_doc_page_request_response_format_type.py
rename to src/gooey/evaluator/types/async_form_bulk_eval_request_response_format_type.py
index c65a896..a4489a8 100644
--- a/src/gooey/types/related_qn_a_doc_page_request_response_format_type.py
+++ b/src/gooey/evaluator/types/async_form_bulk_eval_request_response_format_type.py
@@ -2,4 +2,4 @@
import typing
-RelatedQnADocPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
+AsyncFormBulkEvalRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/evaluator/types/async_form_bulk_eval_request_selected_model.py b/src/gooey/evaluator/types/async_form_bulk_eval_request_selected_model.py
new file mode 100644
index 0000000..52046e6
--- /dev/null
+++ b/src/gooey/evaluator/types/async_form_bulk_eval_request_selected_model.py
@@ -0,0 +1,47 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AsyncFormBulkEvalRequestSelectedModel = typing.Union[
+ typing.Literal[
+ "gpt_4_o",
+ "gpt_4_o_mini",
+ "chatgpt_4_o",
+ "gpt_4_turbo_vision",
+ "gpt_4_vision",
+ "gpt_4_turbo",
+ "gpt_4",
+ "gpt_4_32k",
+ "gpt_3_5_turbo",
+ "gpt_3_5_turbo_16k",
+ "gpt_3_5_turbo_instruct",
+ "llama3_70b",
+ "llama_3_groq_70b_tool_use",
+ "llama3_8b",
+ "llama_3_groq_8b_tool_use",
+ "llama2_70b_chat",
+ "mixtral_8x7b_instruct_0_1",
+ "gemma_2_9b_it",
+ "gemma_7b_it",
+ "gemini_1_5_flash",
+ "gemini_1_5_pro",
+ "gemini_1_pro_vision",
+ "gemini_1_pro",
+ "palm2_chat",
+ "palm2_text",
+ "claude_3_5_sonnet",
+ "claude_3_opus",
+ "claude_3_sonnet",
+ "claude_3_haiku",
+ "sea_lion_7b_instruct",
+ "llama3_8b_cpt_sea_lion_v2_instruct",
+ "sarvam_2b",
+ "text_davinci_003",
+ "text_davinci_002",
+ "code_davinci_002",
+ "text_curie_001",
+ "text_babbage_001",
+ "text_ada_001",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/functions/client.py b/src/gooey/functions/client.py
index 79ae564..6daa1ec 100644
--- a/src/gooey/functions/client.py
+++ b/src/gooey/functions/client.py
@@ -12,10 +12,14 @@
from ..errors.payment_required_error import PaymentRequiredError
from ..errors.too_many_requests_error import TooManyRequestsError
from ..errors.unprocessable_entity_error import UnprocessableEntityError
-from ..types.body_async_form_functions import BodyAsyncFormFunctions
from ..types.failed_reponse_model_v2 import FailedReponseModelV2
+from ..types.functions_page_status_response import FunctionsPageStatusResponse
from ..types.generic_error_response import GenericErrorResponse
from ..types.http_validation_error import HttpValidationError
+from ..types.run_settings import RunSettings
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
class FunctionsClient:
@@ -23,19 +27,33 @@ def __init__(self, *, client_wrapper: SyncClientWrapper):
self._client_wrapper = client_wrapper
def async_form_functions(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormFunctions:
+ self,
+ *,
+ example_id: typing.Optional[str] = None,
+ code: typing.Optional[str] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> FunctionsPageStatusResponse:
"""
Parameters
----------
example_id : typing.Optional[str]
+ code : typing.Optional[str]
+ The JS code to be executed.
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used in the code
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormFunctions
+ FunctionsPageStatusResponse
Successful Response
Examples
@@ -48,11 +66,17 @@ def async_form_functions(
client.functions.async_form_functions()
"""
_response = self._client_wrapper.httpx_client.request(
- "v3/functions/async/form", method="POST", params={"example_id": example_id}, request_options=request_options
+ "v3/functions/async/form",
+ method="POST",
+ params={"example_id": example_id},
+ data={"code": code, "variables": variables, "settings": settings},
+ files={},
+ request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormFunctions, parse_obj_as(type_=BodyAsyncFormFunctions, object_=_response.json())) # type: ignore
+ return typing.cast(FunctionsPageStatusResponse, parse_obj_as(type_=FunctionsPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -78,16 +102,21 @@ def async_form_functions(
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def post(self, *, request_options: typing.Optional[RequestOptions] = None) -> None:
+ def status_functions(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> FunctionsPageStatusResponse:
"""
Parameters
----------
+ run_id : str
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- None
+ FunctionsPageStatusResponse
+ Successful Response
Examples
--------
@@ -96,14 +125,28 @@ def post(self, *, request_options: typing.Optional[RequestOptions] = None) -> No
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.functions.post()
+ client.functions.status_functions(
+ run_id="run_id",
+ )
"""
_response = self._client_wrapper.httpx_client.request(
- "v2/functions/", method="POST", request_options=request_options
+ "v3/functions/status", method="GET", params={"run_id": run_id}, request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return
+ return typing.cast(FunctionsPageStatusResponse, parse_obj_as(type_=FunctionsPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
@@ -115,19 +158,33 @@ def __init__(self, *, client_wrapper: AsyncClientWrapper):
self._client_wrapper = client_wrapper
async def async_form_functions(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormFunctions:
+ self,
+ *,
+ example_id: typing.Optional[str] = None,
+ code: typing.Optional[str] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> FunctionsPageStatusResponse:
"""
Parameters
----------
example_id : typing.Optional[str]
+ code : typing.Optional[str]
+ The JS code to be executed.
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used in the code
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormFunctions
+ FunctionsPageStatusResponse
Successful Response
Examples
@@ -148,11 +205,17 @@ async def main() -> None:
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v3/functions/async/form", method="POST", params={"example_id": example_id}, request_options=request_options
+ "v3/functions/async/form",
+ method="POST",
+ params={"example_id": example_id},
+ data={"code": code, "variables": variables, "settings": settings},
+ files={},
+ request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormFunctions, parse_obj_as(type_=BodyAsyncFormFunctions, object_=_response.json())) # type: ignore
+ return typing.cast(FunctionsPageStatusResponse, parse_obj_as(type_=FunctionsPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -178,16 +241,21 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def post(self, *, request_options: typing.Optional[RequestOptions] = None) -> None:
+ async def status_functions(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> FunctionsPageStatusResponse:
"""
Parameters
----------
+ run_id : str
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- None
+ FunctionsPageStatusResponse
+ Successful Response
Examples
--------
@@ -201,17 +269,31 @@ async def post(self, *, request_options: typing.Optional[RequestOptions] = None)
async def main() -> None:
- await client.functions.post()
+ await client.functions.status_functions(
+ run_id="run_id",
+ )
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v2/functions/", method="POST", request_options=request_options
+ "v3/functions/status", method="GET", params={"run_id": run_id}, request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return
+ return typing.cast(FunctionsPageStatusResponse, parse_obj_as(type_=FunctionsPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
diff --git a/src/gooey/generate_people_also_ask_seo_content/__init__.py b/src/gooey/generate_people_also_ask_seo_content/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/generate_people_also_ask_seo_content/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/generate_people_also_ask_seo_content/client.py b/src/gooey/generate_people_also_ask_seo_content/client.py
new file mode 100644
index 0000000..2db9847
--- /dev/null
+++ b/src/gooey/generate_people_also_ask_seo_content/client.py
@@ -0,0 +1,133 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import parse_obj_as
+from ..core.request_options import RequestOptions
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.related_qn_a_page_status_response import RelatedQnAPageStatusResponse
+
+
+class GeneratePeopleAlsoAskSeoContentClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def status_related_qna_maker(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> RelatedQnAPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ RelatedQnAPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.generate_people_also_ask_seo_content.status_related_qna_maker(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/related-qna-maker/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(RelatedQnAPageStatusResponse, parse_obj_as(type_=RelatedQnAPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncGeneratePeopleAlsoAskSeoContentClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def status_related_qna_maker(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> RelatedQnAPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ RelatedQnAPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.generate_people_also_ask_seo_content.status_related_qna_maker(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/related-qna-maker/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(RelatedQnAPageStatusResponse, parse_obj_as(type_=RelatedQnAPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/generate_product_photo_backgrounds/__init__.py b/src/gooey/generate_product_photo_backgrounds/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/generate_product_photo_backgrounds/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/generate_product_photo_backgrounds/client.py b/src/gooey/generate_product_photo_backgrounds/client.py
new file mode 100644
index 0000000..2869c08
--- /dev/null
+++ b/src/gooey/generate_product_photo_backgrounds/client.py
@@ -0,0 +1,133 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import parse_obj_as
+from ..core.request_options import RequestOptions
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.object_inpainting_page_status_response import ObjectInpaintingPageStatusResponse
+
+
+class GenerateProductPhotoBackgroundsClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def status_object_inpainting(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> ObjectInpaintingPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ObjectInpaintingPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.generate_product_photo_backgrounds.status_object_inpainting(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/ObjectInpainting/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(ObjectInpaintingPageStatusResponse, parse_obj_as(type_=ObjectInpaintingPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncGenerateProductPhotoBackgroundsClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def status_object_inpainting(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> ObjectInpaintingPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ObjectInpaintingPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.generate_product_photo_backgrounds.status_object_inpainting(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/ObjectInpainting/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(ObjectInpaintingPageStatusResponse, parse_obj_as(type_=ObjectInpaintingPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/large_language_models_gpt3/__init__.py b/src/gooey/large_language_models_gpt3/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/large_language_models_gpt3/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/large_language_models_gpt3/client.py b/src/gooey/large_language_models_gpt3/client.py
new file mode 100644
index 0000000..9c167fd
--- /dev/null
+++ b/src/gooey/large_language_models_gpt3/client.py
@@ -0,0 +1,133 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import parse_obj_as
+from ..core.request_options import RequestOptions
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.compare_llm_page_status_response import CompareLlmPageStatusResponse
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+
+
+class LargeLanguageModelsGpt3Client:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def status_compare_llm(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> CompareLlmPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ CompareLlmPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.large_language_models_gpt3.status_compare_llm(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/CompareLLM/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(CompareLlmPageStatusResponse, parse_obj_as(type_=CompareLlmPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncLargeLanguageModelsGpt3Client:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def status_compare_llm(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> CompareLlmPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ CompareLlmPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.large_language_models_gpt3.status_compare_llm(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/CompareLLM/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(CompareLlmPageStatusResponse, parse_obj_as(type_=CompareLlmPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/letter_writer/__init__.py b/src/gooey/letter_writer/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/letter_writer/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/letter_writer/client.py b/src/gooey/letter_writer/client.py
new file mode 100644
index 0000000..75b6be2
--- /dev/null
+++ b/src/gooey/letter_writer/client.py
@@ -0,0 +1,133 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import parse_obj_as
+from ..core.request_options import RequestOptions
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.letter_writer_page_status_response import LetterWriterPageStatusResponse
+
+
+class LetterWriterClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def status_letter_writer(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> LetterWriterPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ LetterWriterPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.letter_writer.status_letter_writer(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/LetterWriter/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(LetterWriterPageStatusResponse, parse_obj_as(type_=LetterWriterPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncLetterWriterClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def status_letter_writer(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> LetterWriterPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ LetterWriterPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.letter_writer.status_letter_writer(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/LetterWriter/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(LetterWriterPageStatusResponse, parse_obj_as(type_=LetterWriterPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/lip_syncing/__init__.py b/src/gooey/lip_syncing/__init__.py
index f3ea265..4575c3f 100644
--- a/src/gooey/lip_syncing/__init__.py
+++ b/src/gooey/lip_syncing/__init__.py
@@ -1,2 +1,5 @@
# This file was auto-generated by Fern from our API Definition.
+from .types import AsyncFormLipsyncRequestSelectedModel
+
+__all__ = ["AsyncFormLipsyncRequestSelectedModel"]
diff --git a/src/gooey/lip_syncing/client.py b/src/gooey/lip_syncing/client.py
index 119f062..292dd9f 100644
--- a/src/gooey/lip_syncing/client.py
+++ b/src/gooey/lip_syncing/client.py
@@ -12,10 +12,17 @@
from ..errors.payment_required_error import PaymentRequiredError
from ..errors.too_many_requests_error import TooManyRequestsError
from ..errors.unprocessable_entity_error import UnprocessableEntityError
-from ..types.body_async_form_lipsync import BodyAsyncFormLipsync
from ..types.failed_reponse_model_v2 import FailedReponseModelV2
from ..types.generic_error_response import GenericErrorResponse
from ..types.http_validation_error import HttpValidationError
+from ..types.lipsync_page_status_response import LipsyncPageStatusResponse
+from ..types.recipe_function import RecipeFunction
+from ..types.run_settings import RunSettings
+from ..types.sad_talker_settings import SadTalkerSettings
+from .types.async_form_lipsync_request_selected_model import AsyncFormLipsyncRequestSelectedModel
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
class LipSyncingClient:
@@ -23,19 +30,56 @@ def __init__(self, *, client_wrapper: SyncClientWrapper):
self._client_wrapper = client_wrapper
def async_form_lipsync(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormLipsync:
+ self,
+ *,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ input_face: typing.Optional[str] = None,
+ face_padding_top: typing.Optional[int] = None,
+ face_padding_bottom: typing.Optional[int] = None,
+ face_padding_left: typing.Optional[int] = None,
+ face_padding_right: typing.Optional[int] = None,
+ sadtalker_settings: typing.Optional[SadTalkerSettings] = None,
+ selected_model: typing.Optional[AsyncFormLipsyncRequestSelectedModel] = None,
+ input_audio: typing.Optional[str] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> LipsyncPageStatusResponse:
"""
Parameters
----------
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ input_face : typing.Optional[str]
+
+ face_padding_top : typing.Optional[int]
+
+ face_padding_bottom : typing.Optional[int]
+
+ face_padding_left : typing.Optional[int]
+
+ face_padding_right : typing.Optional[int]
+
+ sadtalker_settings : typing.Optional[SadTalkerSettings]
+
+ selected_model : typing.Optional[AsyncFormLipsyncRequestSelectedModel]
+
+ input_audio : typing.Optional[str]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormLipsync
+ LipsyncPageStatusResponse
Successful Response
Examples
@@ -48,11 +92,29 @@ def async_form_lipsync(
client.lip_syncing.async_form_lipsync()
"""
_response = self._client_wrapper.httpx_client.request(
- "v3/Lipsync/async/form", method="POST", params={"example_id": example_id}, request_options=request_options
+ "v3/Lipsync/async/form",
+ method="POST",
+ params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "input_face": input_face,
+ "face_padding_top": face_padding_top,
+ "face_padding_bottom": face_padding_bottom,
+ "face_padding_left": face_padding_left,
+ "face_padding_right": face_padding_right,
+ "sadtalker_settings": sadtalker_settings,
+ "selected_model": selected_model,
+ "input_audio": input_audio,
+ "settings": settings,
+ },
+ files={},
+ request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormLipsync, parse_obj_as(type_=BodyAsyncFormLipsync, object_=_response.json())) # type: ignore
+ return typing.cast(LipsyncPageStatusResponse, parse_obj_as(type_=LipsyncPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -78,25 +140,112 @@ def async_form_lipsync(
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
+ def status_lipsync(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> LipsyncPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ LipsyncPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.lip_syncing.status_lipsync(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/Lipsync/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(LipsyncPageStatusResponse, parse_obj_as(type_=LipsyncPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
class AsyncLipSyncingClient:
def __init__(self, *, client_wrapper: AsyncClientWrapper):
self._client_wrapper = client_wrapper
async def async_form_lipsync(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormLipsync:
+ self,
+ *,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ input_face: typing.Optional[str] = None,
+ face_padding_top: typing.Optional[int] = None,
+ face_padding_bottom: typing.Optional[int] = None,
+ face_padding_left: typing.Optional[int] = None,
+ face_padding_right: typing.Optional[int] = None,
+ sadtalker_settings: typing.Optional[SadTalkerSettings] = None,
+ selected_model: typing.Optional[AsyncFormLipsyncRequestSelectedModel] = None,
+ input_audio: typing.Optional[str] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> LipsyncPageStatusResponse:
"""
Parameters
----------
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ input_face : typing.Optional[str]
+
+ face_padding_top : typing.Optional[int]
+
+ face_padding_bottom : typing.Optional[int]
+
+ face_padding_left : typing.Optional[int]
+
+ face_padding_right : typing.Optional[int]
+
+ sadtalker_settings : typing.Optional[SadTalkerSettings]
+
+ selected_model : typing.Optional[AsyncFormLipsyncRequestSelectedModel]
+
+ input_audio : typing.Optional[str]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormLipsync
+ LipsyncPageStatusResponse
Successful Response
Examples
@@ -117,11 +266,29 @@ async def main() -> None:
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v3/Lipsync/async/form", method="POST", params={"example_id": example_id}, request_options=request_options
+ "v3/Lipsync/async/form",
+ method="POST",
+ params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "input_face": input_face,
+ "face_padding_top": face_padding_top,
+ "face_padding_bottom": face_padding_bottom,
+ "face_padding_left": face_padding_left,
+ "face_padding_right": face_padding_right,
+ "sadtalker_settings": sadtalker_settings,
+ "selected_model": selected_model,
+ "input_audio": input_audio,
+ "settings": settings,
+ },
+ files={},
+ request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormLipsync, parse_obj_as(type_=BodyAsyncFormLipsync, object_=_response.json())) # type: ignore
+ return typing.cast(LipsyncPageStatusResponse, parse_obj_as(type_=LipsyncPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -146,3 +313,61 @@ async def main() -> None:
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def status_lipsync(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> LipsyncPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ LipsyncPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.lip_syncing.status_lipsync(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/Lipsync/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(LipsyncPageStatusResponse, parse_obj_as(type_=LipsyncPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/lip_syncing/types/__init__.py b/src/gooey/lip_syncing/types/__init__.py
new file mode 100644
index 0000000..230913e
--- /dev/null
+++ b/src/gooey/lip_syncing/types/__init__.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .async_form_lipsync_request_selected_model import AsyncFormLipsyncRequestSelectedModel
+
+__all__ = ["AsyncFormLipsyncRequestSelectedModel"]
diff --git a/src/gooey/lip_syncing/types/async_form_lipsync_request_selected_model.py b/src/gooey/lip_syncing/types/async_form_lipsync_request_selected_model.py
new file mode 100644
index 0000000..4aeb464
--- /dev/null
+++ b/src/gooey/lip_syncing/types/async_form_lipsync_request_selected_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AsyncFormLipsyncRequestSelectedModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any]
diff --git a/src/gooey/lipsync_video_with_any_text/__init__.py b/src/gooey/lipsync_video_with_any_text/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/lipsync_video_with_any_text/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/lipsync_video_with_any_text/client.py b/src/gooey/lipsync_video_with_any_text/client.py
new file mode 100644
index 0000000..ccfe50a
--- /dev/null
+++ b/src/gooey/lipsync_video_with_any_text/client.py
@@ -0,0 +1,133 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import parse_obj_as
+from ..core.request_options import RequestOptions
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.lipsync_tts_page_status_response import LipsyncTtsPageStatusResponse
+
+
+class LipsyncVideoWithAnyTextClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def status_lipsync_tts(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> LipsyncTtsPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ LipsyncTtsPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.lipsync_video_with_any_text.status_lipsync_tts(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/LipsyncTTS/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(LipsyncTtsPageStatusResponse, parse_obj_as(type_=LipsyncTtsPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncLipsyncVideoWithAnyTextClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def status_lipsync_tts(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> LipsyncTtsPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ LipsyncTtsPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.lipsync_video_with_any_text.status_lipsync_tts(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/LipsyncTTS/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(LipsyncTtsPageStatusResponse, parse_obj_as(type_=LipsyncTtsPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/misc/client.py b/src/gooey/misc/client.py
index 39675bf..bdf585b 100644
--- a/src/gooey/misc/client.py
+++ b/src/gooey/misc/client.py
@@ -59,11 +59,11 @@ def video_bots_broadcast(
text: str,
example_id: typing.Optional[str] = None,
run_id: typing.Optional[str] = None,
- audio: typing.Optional[str] = OMIT,
- video: typing.Optional[str] = OMIT,
- documents: typing.Optional[typing.Sequence[str]] = OMIT,
- buttons: typing.Optional[typing.Sequence[ReplyButton]] = OMIT,
- filters: typing.Optional[BotBroadcastFilters] = OMIT,
+ audio: typing.Optional[str] = None,
+ video: typing.Optional[str] = None,
+ documents: typing.Optional[typing.List[str]] = None,
+ buttons: typing.Optional[typing.List[ReplyButton]] = None,
+ filters: typing.Optional[BotBroadcastFilters] = None,
request_options: typing.Optional[RequestOptions] = None
) -> typing.Any:
"""
@@ -82,10 +82,10 @@ def video_bots_broadcast(
video : typing.Optional[str]
Video URL to send to all users
- documents : typing.Optional[typing.Sequence[str]]
+ documents : typing.Optional[typing.List[str]]
Video URL to send to all users
- buttons : typing.Optional[typing.Sequence[ReplyButton]]
+ buttons : typing.Optional[typing.List[ReplyButton]]
Buttons to send to all users
filters : typing.Optional[BotBroadcastFilters]
@@ -114,7 +114,7 @@ def video_bots_broadcast(
"v2/video-bots/broadcast/send/",
method="POST",
params={"example_id": example_id, "run_id": run_id},
- json={
+ data={
"text": text,
"audio": audio,
"video": video,
@@ -122,6 +122,7 @@ def video_bots_broadcast(
"buttons": buttons,
"filters": filters,
},
+ files={},
request_options=request_options,
omit=OMIT,
)
@@ -188,11 +189,11 @@ async def video_bots_broadcast(
text: str,
example_id: typing.Optional[str] = None,
run_id: typing.Optional[str] = None,
- audio: typing.Optional[str] = OMIT,
- video: typing.Optional[str] = OMIT,
- documents: typing.Optional[typing.Sequence[str]] = OMIT,
- buttons: typing.Optional[typing.Sequence[ReplyButton]] = OMIT,
- filters: typing.Optional[BotBroadcastFilters] = OMIT,
+ audio: typing.Optional[str] = None,
+ video: typing.Optional[str] = None,
+ documents: typing.Optional[typing.List[str]] = None,
+ buttons: typing.Optional[typing.List[ReplyButton]] = None,
+ filters: typing.Optional[BotBroadcastFilters] = None,
request_options: typing.Optional[RequestOptions] = None
) -> typing.Any:
"""
@@ -211,10 +212,10 @@ async def video_bots_broadcast(
video : typing.Optional[str]
Video URL to send to all users
- documents : typing.Optional[typing.Sequence[str]]
+ documents : typing.Optional[typing.List[str]]
Video URL to send to all users
- buttons : typing.Optional[typing.Sequence[ReplyButton]]
+ buttons : typing.Optional[typing.List[ReplyButton]]
Buttons to send to all users
filters : typing.Optional[BotBroadcastFilters]
@@ -251,7 +252,7 @@ async def main() -> None:
"v2/video-bots/broadcast/send/",
method="POST",
params={"example_id": example_id, "run_id": run_id},
- json={
+ data={
"text": text,
"audio": audio,
"video": video,
@@ -259,6 +260,7 @@ async def main() -> None:
"buttons": buttons,
"filters": filters,
},
+ files={},
request_options=request_options,
omit=OMIT,
)
diff --git a/src/gooey/people_also_ask_answers_from_a_doc/__init__.py b/src/gooey/people_also_ask_answers_from_a_doc/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/people_also_ask_answers_from_a_doc/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/people_also_ask_answers_from_a_doc/client.py b/src/gooey/people_also_ask_answers_from_a_doc/client.py
new file mode 100644
index 0000000..f313451
--- /dev/null
+++ b/src/gooey/people_also_ask_answers_from_a_doc/client.py
@@ -0,0 +1,133 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import parse_obj_as
+from ..core.request_options import RequestOptions
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.related_qn_a_doc_page_status_response import RelatedQnADocPageStatusResponse
+
+
+class PeopleAlsoAskAnswersFromADocClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def status_related_qna_maker_doc(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> RelatedQnADocPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ RelatedQnADocPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.people_also_ask_answers_from_a_doc.status_related_qna_maker_doc(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/related-qna-maker-doc/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(RelatedQnADocPageStatusResponse, parse_obj_as(type_=RelatedQnADocPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncPeopleAlsoAskAnswersFromADocClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def status_related_qna_maker_doc(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> RelatedQnADocPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ RelatedQnADocPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.people_also_ask_answers_from_a_doc.status_related_qna_maker_doc(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/related-qna-maker-doc/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(RelatedQnADocPageStatusResponse, parse_obj_as(type_=RelatedQnADocPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/profile_lookup_gpt3for_ai_personalized_emails/__init__.py b/src/gooey/profile_lookup_gpt3for_ai_personalized_emails/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/profile_lookup_gpt3for_ai_personalized_emails/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/profile_lookup_gpt3for_ai_personalized_emails/client.py b/src/gooey/profile_lookup_gpt3for_ai_personalized_emails/client.py
new file mode 100644
index 0000000..e650727
--- /dev/null
+++ b/src/gooey/profile_lookup_gpt3for_ai_personalized_emails/client.py
@@ -0,0 +1,133 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import parse_obj_as
+from ..core.request_options import RequestOptions
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.social_lookup_email_page_status_response import SocialLookupEmailPageStatusResponse
+
+
+class ProfileLookupGpt3ForAiPersonalizedEmailsClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def status_social_lookup_email(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> SocialLookupEmailPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ SocialLookupEmailPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.profile_lookup_gpt3for_ai_personalized_emails.status_social_lookup_email(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/SocialLookupEmail/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(SocialLookupEmailPageStatusResponse, parse_obj_as(type_=SocialLookupEmailPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncProfileLookupGpt3ForAiPersonalizedEmailsClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def status_social_lookup_email(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> SocialLookupEmailPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ SocialLookupEmailPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.profile_lookup_gpt3for_ai_personalized_emails.status_social_lookup_email(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/SocialLookupEmail/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(SocialLookupEmailPageStatusResponse, parse_obj_as(type_=SocialLookupEmailPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/render_image_search_results_with_ai/__init__.py b/src/gooey/render_image_search_results_with_ai/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/render_image_search_results_with_ai/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/render_image_search_results_with_ai/client.py b/src/gooey/render_image_search_results_with_ai/client.py
new file mode 100644
index 0000000..8c75b75
--- /dev/null
+++ b/src/gooey/render_image_search_results_with_ai/client.py
@@ -0,0 +1,133 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import parse_obj_as
+from ..core.request_options import RequestOptions
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.google_image_gen_page_status_response import GoogleImageGenPageStatusResponse
+from ..types.http_validation_error import HttpValidationError
+
+
+class RenderImageSearchResultsWithAiClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def status_google_image_gen(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> GoogleImageGenPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GoogleImageGenPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.render_image_search_results_with_ai.status_google_image_gen(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/GoogleImageGen/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(GoogleImageGenPageStatusResponse, parse_obj_as(type_=GoogleImageGenPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncRenderImageSearchResultsWithAiClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def status_google_image_gen(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> GoogleImageGenPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GoogleImageGenPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.render_image_search_results_with_ai.status_google_image_gen(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/GoogleImageGen/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(GoogleImageGenPageStatusResponse, parse_obj_as(type_=GoogleImageGenPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/search_your_docs_with_gpt/__init__.py b/src/gooey/search_your_docs_with_gpt/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/search_your_docs_with_gpt/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/search_your_docs_with_gpt/client.py b/src/gooey/search_your_docs_with_gpt/client.py
new file mode 100644
index 0000000..29abb71
--- /dev/null
+++ b/src/gooey/search_your_docs_with_gpt/client.py
@@ -0,0 +1,133 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import parse_obj_as
+from ..core.request_options import RequestOptions
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.doc_search_page_status_response import DocSearchPageStatusResponse
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+
+
+class SearchYourDocsWithGptClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def status_doc_search(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> DocSearchPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ DocSearchPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.search_your_docs_with_gpt.status_doc_search(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/doc-search/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(DocSearchPageStatusResponse, parse_obj_as(type_=DocSearchPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncSearchYourDocsWithGptClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def status_doc_search(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> DocSearchPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ DocSearchPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.search_your_docs_with_gpt.status_doc_search(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/doc-search/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(DocSearchPageStatusResponse, parse_obj_as(type_=DocSearchPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/smart_gpt/__init__.py b/src/gooey/smart_gpt/__init__.py
index f3ea265..daee63a 100644
--- a/src/gooey/smart_gpt/__init__.py
+++ b/src/gooey/smart_gpt/__init__.py
@@ -1,2 +1,5 @@
# This file was auto-generated by Fern from our API Definition.
+from .types import AsyncFormSmartGptRequestResponseFormatType, AsyncFormSmartGptRequestSelectedModel
+
+__all__ = ["AsyncFormSmartGptRequestResponseFormatType", "AsyncFormSmartGptRequestSelectedModel"]
diff --git a/src/gooey/smart_gpt/client.py b/src/gooey/smart_gpt/client.py
index d06562d..b7a2425 100644
--- a/src/gooey/smart_gpt/client.py
+++ b/src/gooey/smart_gpt/client.py
@@ -12,10 +12,17 @@
from ..errors.payment_required_error import PaymentRequiredError
from ..errors.too_many_requests_error import TooManyRequestsError
from ..errors.unprocessable_entity_error import UnprocessableEntityError
-from ..types.body_async_form_smart_gpt import BodyAsyncFormSmartGpt
from ..types.failed_reponse_model_v2 import FailedReponseModelV2
from ..types.generic_error_response import GenericErrorResponse
from ..types.http_validation_error import HttpValidationError
+from ..types.recipe_function import RecipeFunction
+from ..types.run_settings import RunSettings
+from ..types.smart_gpt_page_status_response import SmartGptPageStatusResponse
+from .types.async_form_smart_gpt_request_response_format_type import AsyncFormSmartGptRequestResponseFormatType
+from .types.async_form_smart_gpt_request_selected_model import AsyncFormSmartGptRequestSelectedModel
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
class SmartGptClient:
@@ -23,19 +30,65 @@ def __init__(self, *, client_wrapper: SyncClientWrapper):
self._client_wrapper = client_wrapper
def async_form_smart_gpt(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormSmartGpt:
+ self,
+ *,
+ input_prompt: str,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ cot_prompt: typing.Optional[str] = None,
+ reflexion_prompt: typing.Optional[str] = None,
+ dera_prompt: typing.Optional[str] = None,
+ selected_model: typing.Optional[AsyncFormSmartGptRequestSelectedModel] = None,
+ avoid_repetition: typing.Optional[bool] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[float] = None,
+ max_tokens: typing.Optional[int] = None,
+ sampling_temperature: typing.Optional[float] = None,
+ response_format_type: typing.Optional[AsyncFormSmartGptRequestResponseFormatType] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> SmartGptPageStatusResponse:
"""
Parameters
----------
+ input_prompt : str
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ cot_prompt : typing.Optional[str]
+
+ reflexion_prompt : typing.Optional[str]
+
+ dera_prompt : typing.Optional[str]
+
+ selected_model : typing.Optional[AsyncFormSmartGptRequestSelectedModel]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ response_format_type : typing.Optional[AsyncFormSmartGptRequestResponseFormatType]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormSmartGpt
+ SmartGptPageStatusResponse
Successful Response
Examples
@@ -45,14 +98,37 @@ def async_form_smart_gpt(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.smart_gpt.async_form_smart_gpt()
+ client.smart_gpt.async_form_smart_gpt(
+ input_prompt="input_prompt",
+ )
"""
_response = self._client_wrapper.httpx_client.request(
- "v3/SmartGPT/async/form", method="POST", params={"example_id": example_id}, request_options=request_options
+ "v3/SmartGPT/async/form",
+ method="POST",
+ params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "input_prompt": input_prompt,
+ "cot_prompt": cot_prompt,
+ "reflexion_prompt": reflexion_prompt,
+ "dera_prompt": dera_prompt,
+ "selected_model": selected_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "response_format_type": response_format_type,
+ "settings": settings,
+ },
+ files={},
+ request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormSmartGpt, parse_obj_as(type_=BodyAsyncFormSmartGpt, object_=_response.json())) # type: ignore
+ return typing.cast(SmartGptPageStatusResponse, parse_obj_as(type_=SmartGptPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -78,16 +154,21 @@ def async_form_smart_gpt(
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def post(self, *, request_options: typing.Optional[RequestOptions] = None) -> None:
+ def status_smart_gpt(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> SmartGptPageStatusResponse:
"""
Parameters
----------
+ run_id : str
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- None
+ SmartGptPageStatusResponse
+ Successful Response
Examples
--------
@@ -96,14 +177,28 @@ def post(self, *, request_options: typing.Optional[RequestOptions] = None) -> No
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.smart_gpt.post()
+ client.smart_gpt.status_smart_gpt(
+ run_id="run_id",
+ )
"""
_response = self._client_wrapper.httpx_client.request(
- "v2/SmartGPT/", method="POST", request_options=request_options
+ "v3/SmartGPT/status", method="GET", params={"run_id": run_id}, request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return
+ return typing.cast(SmartGptPageStatusResponse, parse_obj_as(type_=SmartGptPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
@@ -115,19 +210,65 @@ def __init__(self, *, client_wrapper: AsyncClientWrapper):
self._client_wrapper = client_wrapper
async def async_form_smart_gpt(
- self, *, example_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
- ) -> BodyAsyncFormSmartGpt:
+ self,
+ *,
+ input_prompt: str,
+ example_id: typing.Optional[str] = None,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ cot_prompt: typing.Optional[str] = None,
+ reflexion_prompt: typing.Optional[str] = None,
+ dera_prompt: typing.Optional[str] = None,
+ selected_model: typing.Optional[AsyncFormSmartGptRequestSelectedModel] = None,
+ avoid_repetition: typing.Optional[bool] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[float] = None,
+ max_tokens: typing.Optional[int] = None,
+ sampling_temperature: typing.Optional[float] = None,
+ response_format_type: typing.Optional[AsyncFormSmartGptRequestResponseFormatType] = None,
+ settings: typing.Optional[RunSettings] = None,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> SmartGptPageStatusResponse:
"""
Parameters
----------
+ input_prompt : str
+
example_id : typing.Optional[str]
+ functions : typing.Optional[typing.List[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ cot_prompt : typing.Optional[str]
+
+ reflexion_prompt : typing.Optional[str]
+
+ dera_prompt : typing.Optional[str]
+
+ selected_model : typing.Optional[AsyncFormSmartGptRequestSelectedModel]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ response_format_type : typing.Optional[AsyncFormSmartGptRequestResponseFormatType]
+
+ settings : typing.Optional[RunSettings]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- BodyAsyncFormSmartGpt
+ SmartGptPageStatusResponse
Successful Response
Examples
@@ -142,17 +283,40 @@ async def async_form_smart_gpt(
async def main() -> None:
- await client.smart_gpt.async_form_smart_gpt()
+ await client.smart_gpt.async_form_smart_gpt(
+ input_prompt="input_prompt",
+ )
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v3/SmartGPT/async/form", method="POST", params={"example_id": example_id}, request_options=request_options
+ "v3/SmartGPT/async/form",
+ method="POST",
+ params={"example_id": example_id},
+ data={
+ "functions": functions,
+ "variables": variables,
+ "input_prompt": input_prompt,
+ "cot_prompt": cot_prompt,
+ "reflexion_prompt": reflexion_prompt,
+ "dera_prompt": dera_prompt,
+ "selected_model": selected_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "response_format_type": response_format_type,
+ "settings": settings,
+ },
+ files={},
+ request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return typing.cast(BodyAsyncFormSmartGpt, parse_obj_as(type_=BodyAsyncFormSmartGpt, object_=_response.json())) # type: ignore
+ return typing.cast(SmartGptPageStatusResponse, parse_obj_as(type_=SmartGptPageStatusResponse, object_=_response.json())) # type: ignore
if _response.status_code == 400:
raise BadRequestError(
typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
@@ -178,16 +342,21 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def post(self, *, request_options: typing.Optional[RequestOptions] = None) -> None:
+ async def status_smart_gpt(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> SmartGptPageStatusResponse:
"""
Parameters
----------
+ run_id : str
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- None
+ SmartGptPageStatusResponse
+ Successful Response
Examples
--------
@@ -201,17 +370,31 @@ async def post(self, *, request_options: typing.Optional[RequestOptions] = None)
async def main() -> None:
- await client.smart_gpt.post()
+ await client.smart_gpt.status_smart_gpt(
+ run_id="run_id",
+ )
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "v2/SmartGPT/", method="POST", request_options=request_options
+ "v3/SmartGPT/status", method="GET", params={"run_id": run_id}, request_options=request_options
)
try:
if 200 <= _response.status_code < 300:
- return
+ return typing.cast(SmartGptPageStatusResponse, parse_obj_as(type_=SmartGptPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
diff --git a/src/gooey/smart_gpt/types/__init__.py b/src/gooey/smart_gpt/types/__init__.py
new file mode 100644
index 0000000..1297bd3
--- /dev/null
+++ b/src/gooey/smart_gpt/types/__init__.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .async_form_smart_gpt_request_response_format_type import AsyncFormSmartGptRequestResponseFormatType
+from .async_form_smart_gpt_request_selected_model import AsyncFormSmartGptRequestSelectedModel
+
+__all__ = ["AsyncFormSmartGptRequestResponseFormatType", "AsyncFormSmartGptRequestSelectedModel"]
diff --git a/src/gooey/smart_gpt/types/async_form_smart_gpt_request_response_format_type.py b/src/gooey/smart_gpt/types/async_form_smart_gpt_request_response_format_type.py
new file mode 100644
index 0000000..4f73056
--- /dev/null
+++ b/src/gooey/smart_gpt/types/async_form_smart_gpt_request_response_format_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AsyncFormSmartGptRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/smart_gpt/types/async_form_smart_gpt_request_selected_model.py b/src/gooey/smart_gpt/types/async_form_smart_gpt_request_selected_model.py
new file mode 100644
index 0000000..864c97b
--- /dev/null
+++ b/src/gooey/smart_gpt/types/async_form_smart_gpt_request_selected_model.py
@@ -0,0 +1,47 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AsyncFormSmartGptRequestSelectedModel = typing.Union[
+ typing.Literal[
+ "gpt_4_o",
+ "gpt_4_o_mini",
+ "chatgpt_4_o",
+ "gpt_4_turbo_vision",
+ "gpt_4_vision",
+ "gpt_4_turbo",
+ "gpt_4",
+ "gpt_4_32k",
+ "gpt_3_5_turbo",
+ "gpt_3_5_turbo_16k",
+ "gpt_3_5_turbo_instruct",
+ "llama3_70b",
+ "llama_3_groq_70b_tool_use",
+ "llama3_8b",
+ "llama_3_groq_8b_tool_use",
+ "llama2_70b_chat",
+ "mixtral_8x7b_instruct_0_1",
+ "gemma_2_9b_it",
+ "gemma_7b_it",
+ "gemini_1_5_flash",
+ "gemini_1_5_pro",
+ "gemini_1_pro_vision",
+ "gemini_1_pro",
+ "palm2_chat",
+ "palm2_text",
+ "claude_3_5_sonnet",
+ "claude_3_opus",
+ "claude_3_sonnet",
+ "claude_3_haiku",
+ "sea_lion_7b_instruct",
+ "llama3_8b_cpt_sea_lion_v2_instruct",
+ "sarvam_2b",
+ "text_davinci_003",
+ "text_davinci_002",
+ "code_davinci_002",
+ "text_curie_001",
+ "text_babbage_001",
+ "text_ada_001",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/speech_recognition_translation/__init__.py b/src/gooey/speech_recognition_translation/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/speech_recognition_translation/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/speech_recognition_translation/client.py b/src/gooey/speech_recognition_translation/client.py
new file mode 100644
index 0000000..3f83966
--- /dev/null
+++ b/src/gooey/speech_recognition_translation/client.py
@@ -0,0 +1,133 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import parse_obj_as
+from ..core.request_options import RequestOptions
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.asr_page_status_response import AsrPageStatusResponse
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+
+
+class SpeechRecognitionTranslationClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def status_asr(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsrPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsrPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.speech_recognition_translation.status_asr(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/asr/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(AsrPageStatusResponse, parse_obj_as(type_=AsrPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncSpeechRecognitionTranslationClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def status_asr(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsrPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsrPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.speech_recognition_translation.status_asr(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/asr/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(AsrPageStatusResponse, parse_obj_as(type_=AsrPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/summarize_your_docs_with_gpt/__init__.py b/src/gooey/summarize_your_docs_with_gpt/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/summarize_your_docs_with_gpt/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/summarize_your_docs_with_gpt/client.py b/src/gooey/summarize_your_docs_with_gpt/client.py
new file mode 100644
index 0000000..65f0059
--- /dev/null
+++ b/src/gooey/summarize_your_docs_with_gpt/client.py
@@ -0,0 +1,133 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import parse_obj_as
+from ..core.request_options import RequestOptions
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.doc_summary_page_status_response import DocSummaryPageStatusResponse
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+
+
+class SummarizeYourDocsWithGptClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def status_doc_summary(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> DocSummaryPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ DocSummaryPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.summarize_your_docs_with_gpt.status_doc_summary(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/doc-summary/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(DocSummaryPageStatusResponse, parse_obj_as(type_=DocSummaryPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncSummarizeYourDocsWithGptClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def status_doc_summary(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> DocSummaryPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ DocSummaryPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.summarize_your_docs_with_gpt.status_doc_summary(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/doc-summary/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(DocSummaryPageStatusResponse, parse_obj_as(type_=DocSummaryPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/synthetic_data_maker_for_videos_pd_fs/__init__.py b/src/gooey/synthetic_data_maker_for_videos_pd_fs/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/synthetic_data_maker_for_videos_pd_fs/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/synthetic_data_maker_for_videos_pd_fs/client.py b/src/gooey/synthetic_data_maker_for_videos_pd_fs/client.py
new file mode 100644
index 0000000..3abe7cc
--- /dev/null
+++ b/src/gooey/synthetic_data_maker_for_videos_pd_fs/client.py
@@ -0,0 +1,133 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import parse_obj_as
+from ..core.request_options import RequestOptions
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.doc_extract_page_status_response import DocExtractPageStatusResponse
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+
+
+class SyntheticDataMakerForVideosPdFsClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def status_doc_extract(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> DocExtractPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ DocExtractPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.synthetic_data_maker_for_videos_pd_fs.status_doc_extract(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/doc-extract/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(DocExtractPageStatusResponse, parse_obj_as(type_=DocExtractPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncSyntheticDataMakerForVideosPdFsClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def status_doc_extract(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> DocExtractPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ DocExtractPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.synthetic_data_maker_for_videos_pd_fs.status_doc_extract(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/doc-extract/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(DocExtractPageStatusResponse, parse_obj_as(type_=DocExtractPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/text_guided_audio_generator/__init__.py b/src/gooey/text_guided_audio_generator/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/text_guided_audio_generator/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/text_guided_audio_generator/client.py b/src/gooey/text_guided_audio_generator/client.py
new file mode 100644
index 0000000..d9bd16a
--- /dev/null
+++ b/src/gooey/text_guided_audio_generator/client.py
@@ -0,0 +1,133 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import parse_obj_as
+from ..core.request_options import RequestOptions
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.text2audio_page_status_response import Text2AudioPageStatusResponse
+
+
+class TextGuidedAudioGeneratorClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def status_text2audio(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> Text2AudioPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ Text2AudioPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.text_guided_audio_generator.status_text2audio(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/text2audio/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(Text2AudioPageStatusResponse, parse_obj_as(type_=Text2AudioPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncTextGuidedAudioGeneratorClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def status_text2audio(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> Text2AudioPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ Text2AudioPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.text_guided_audio_generator.status_text2audio(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/text2audio/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(Text2AudioPageStatusResponse, parse_obj_as(type_=Text2AudioPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/types/__init__.py b/src/gooey/types/__init__.py
index 64690f3..e422650 100644
--- a/src/gooey/types/__init__.py
+++ b/src/gooey/types/__init__.py
@@ -4,62 +4,19 @@
from .agg_function_function import AggFunctionFunction
from .agg_function_result import AggFunctionResult
from .agg_function_result_function import AggFunctionResultFunction
+from .animate_request_selected_model import AnimateRequestSelectedModel
from .animation_prompt import AnimationPrompt
from .asr_chunk import AsrChunk
from .asr_output_json import AsrOutputJson
from .asr_page_output import AsrPageOutput
from .asr_page_output_output_text_item import AsrPageOutputOutputTextItem
-from .asr_page_request import AsrPageRequest
-from .asr_page_request_output_format import AsrPageRequestOutputFormat
-from .asr_page_request_selected_model import AsrPageRequestSelectedModel
-from .asr_page_request_translation_model import AsrPageRequestTranslationModel
-from .asr_page_response import AsrPageResponse
from .asr_page_status_response import AsrPageStatusResponse
from .async_api_response_model_v3 import AsyncApiResponseModelV3
from .balance_response import BalanceResponse
-from .body_async_form_art_qr_code import BodyAsyncFormArtQrCode
-from .body_async_form_asr import BodyAsyncFormAsr
-from .body_async_form_bulk_eval import BodyAsyncFormBulkEval
-from .body_async_form_bulk_runner import BodyAsyncFormBulkRunner
-from .body_async_form_chyron_plant import BodyAsyncFormChyronPlant
-from .body_async_form_compare_ai_upscalers import BodyAsyncFormCompareAiUpscalers
-from .body_async_form_compare_llm import BodyAsyncFormCompareLlm
-from .body_async_form_compare_text2img import BodyAsyncFormCompareText2Img
-from .body_async_form_deforum_sd import BodyAsyncFormDeforumSd
-from .body_async_form_doc_extract import BodyAsyncFormDocExtract
-from .body_async_form_doc_search import BodyAsyncFormDocSearch
-from .body_async_form_doc_summary import BodyAsyncFormDocSummary
-from .body_async_form_email_face_inpainting import BodyAsyncFormEmailFaceInpainting
-from .body_async_form_embeddings import BodyAsyncFormEmbeddings
-from .body_async_form_face_inpainting import BodyAsyncFormFaceInpainting
-from .body_async_form_functions import BodyAsyncFormFunctions
-from .body_async_form_google_gpt import BodyAsyncFormGoogleGpt
-from .body_async_form_google_image_gen import BodyAsyncFormGoogleImageGen
-from .body_async_form_image_segmentation import BodyAsyncFormImageSegmentation
-from .body_async_form_img2img import BodyAsyncFormImg2Img
-from .body_async_form_letter_writer import BodyAsyncFormLetterWriter
-from .body_async_form_lipsync import BodyAsyncFormLipsync
-from .body_async_form_lipsync_tts import BodyAsyncFormLipsyncTts
-from .body_async_form_object_inpainting import BodyAsyncFormObjectInpainting
-from .body_async_form_related_qna_maker import BodyAsyncFormRelatedQnaMaker
-from .body_async_form_related_qna_maker_doc import BodyAsyncFormRelatedQnaMakerDoc
-from .body_async_form_seo_summary import BodyAsyncFormSeoSummary
-from .body_async_form_smart_gpt import BodyAsyncFormSmartGpt
-from .body_async_form_social_lookup_email import BodyAsyncFormSocialLookupEmail
-from .body_async_form_text2audio import BodyAsyncFormText2Audio
-from .body_async_form_text_to_speech import BodyAsyncFormTextToSpeech
-from .body_async_form_translate import BodyAsyncFormTranslate
-from .body_async_form_video_bots import BodyAsyncFormVideoBots
from .bot_broadcast_filters import BotBroadcastFilters
from .bulk_eval_page_output import BulkEvalPageOutput
-from .bulk_eval_page_request import BulkEvalPageRequest
-from .bulk_eval_page_request_response_format_type import BulkEvalPageRequestResponseFormatType
-from .bulk_eval_page_request_selected_model import BulkEvalPageRequestSelectedModel
-from .bulk_eval_page_response import BulkEvalPageResponse
from .bulk_eval_page_status_response import BulkEvalPageStatusResponse
from .bulk_runner_page_output import BulkRunnerPageOutput
-from .bulk_runner_page_request import BulkRunnerPageRequest
-from .bulk_runner_page_response import BulkRunnerPageResponse
from .bulk_runner_page_status_response import BulkRunnerPageStatusResponse
from .button_pressed import ButtonPressed
from .called_function_response import CalledFunctionResponse
@@ -68,24 +25,12 @@
from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam
from .chyron_plant_page_output import ChyronPlantPageOutput
from .chyron_plant_page_request import ChyronPlantPageRequest
-from .chyron_plant_page_response import ChyronPlantPageResponse
from .chyron_plant_page_status_response import ChyronPlantPageStatusResponse
from .compare_llm_page_output import CompareLlmPageOutput
-from .compare_llm_page_request import CompareLlmPageRequest
-from .compare_llm_page_request_response_format_type import CompareLlmPageRequestResponseFormatType
-from .compare_llm_page_request_selected_models_item import CompareLlmPageRequestSelectedModelsItem
-from .compare_llm_page_response import CompareLlmPageResponse
from .compare_llm_page_status_response import CompareLlmPageStatusResponse
from .compare_text2img_page_output import CompareText2ImgPageOutput
-from .compare_text2img_page_request import CompareText2ImgPageRequest
-from .compare_text2img_page_request_scheduler import CompareText2ImgPageRequestScheduler
-from .compare_text2img_page_request_selected_models_item import CompareText2ImgPageRequestSelectedModelsItem
-from .compare_text2img_page_response import CompareText2ImgPageResponse
from .compare_text2img_page_status_response import CompareText2ImgPageStatusResponse
from .compare_upscaler_page_output import CompareUpscalerPageOutput
-from .compare_upscaler_page_request import CompareUpscalerPageRequest
-from .compare_upscaler_page_request_selected_models_item import CompareUpscalerPageRequestSelectedModelsItem
-from .compare_upscaler_page_response import CompareUpscalerPageResponse
from .compare_upscaler_page_status_response import CompareUpscalerPageStatusResponse
from .console_logs import ConsoleLogs
from .console_logs_level import ConsoleLogsLevel
@@ -100,144 +45,91 @@
from .conversation_start import ConversationStart
from .create_stream_response import CreateStreamResponse
from .deforum_sd_page_output import DeforumSdPageOutput
-from .deforum_sd_page_request import DeforumSdPageRequest
-from .deforum_sd_page_request_selected_model import DeforumSdPageRequestSelectedModel
-from .deforum_sd_page_response import DeforumSdPageResponse
from .deforum_sd_page_status_response import DeforumSdPageStatusResponse
from .doc_extract_page_output import DocExtractPageOutput
-from .doc_extract_page_request import DocExtractPageRequest
-from .doc_extract_page_request_response_format_type import DocExtractPageRequestResponseFormatType
-from .doc_extract_page_request_selected_asr_model import DocExtractPageRequestSelectedAsrModel
-from .doc_extract_page_request_selected_model import DocExtractPageRequestSelectedModel
-from .doc_extract_page_response import DocExtractPageResponse
from .doc_extract_page_status_response import DocExtractPageStatusResponse
from .doc_search_page_output import DocSearchPageOutput
-from .doc_search_page_request import DocSearchPageRequest
-from .doc_search_page_request_citation_style import DocSearchPageRequestCitationStyle
-from .doc_search_page_request_embedding_model import DocSearchPageRequestEmbeddingModel
-from .doc_search_page_request_keyword_query import DocSearchPageRequestKeywordQuery
-from .doc_search_page_request_response_format_type import DocSearchPageRequestResponseFormatType
-from .doc_search_page_request_selected_model import DocSearchPageRequestSelectedModel
-from .doc_search_page_response import DocSearchPageResponse
from .doc_search_page_status_response import DocSearchPageStatusResponse
from .doc_summary_page_output import DocSummaryPageOutput
-from .doc_summary_page_request import DocSummaryPageRequest
-from .doc_summary_page_request_response_format_type import DocSummaryPageRequestResponseFormatType
-from .doc_summary_page_request_selected_asr_model import DocSummaryPageRequestSelectedAsrModel
-from .doc_summary_page_request_selected_model import DocSummaryPageRequestSelectedModel
-from .doc_summary_page_response import DocSummaryPageResponse
from .doc_summary_page_status_response import DocSummaryPageStatusResponse
+from .doc_summary_request_response_format_type import DocSummaryRequestResponseFormatType
+from .doc_summary_request_selected_asr_model import DocSummaryRequestSelectedAsrModel
+from .doc_summary_request_selected_model import DocSummaryRequestSelectedModel
from .email_face_inpainting_page_output import EmailFaceInpaintingPageOutput
-from .email_face_inpainting_page_request import EmailFaceInpaintingPageRequest
-from .email_face_inpainting_page_request_selected_model import EmailFaceInpaintingPageRequestSelectedModel
-from .email_face_inpainting_page_response import EmailFaceInpaintingPageResponse
from .email_face_inpainting_page_status_response import EmailFaceInpaintingPageStatusResponse
+from .embed_request_selected_model import EmbedRequestSelectedModel
from .embeddings_page_output import EmbeddingsPageOutput
-from .embeddings_page_request import EmbeddingsPageRequest
-from .embeddings_page_request_selected_model import EmbeddingsPageRequestSelectedModel
-from .embeddings_page_response import EmbeddingsPageResponse
from .embeddings_page_status_response import EmbeddingsPageStatusResponse
from .eval_prompt import EvalPrompt
from .face_inpainting_page_output import FaceInpaintingPageOutput
-from .face_inpainting_page_request import FaceInpaintingPageRequest
-from .face_inpainting_page_request_selected_model import FaceInpaintingPageRequestSelectedModel
-from .face_inpainting_page_response import FaceInpaintingPageResponse
from .face_inpainting_page_status_response import FaceInpaintingPageStatusResponse
from .failed_reponse_model_v2 import FailedReponseModelV2
from .failed_response_detail import FailedResponseDetail
from .final_response import FinalResponse
from .functions_page_output import FunctionsPageOutput
-from .functions_page_request import FunctionsPageRequest
-from .functions_page_response import FunctionsPageResponse
from .functions_page_status_response import FunctionsPageStatusResponse
from .generic_error_response import GenericErrorResponse
from .generic_error_response_detail import GenericErrorResponseDetail
from .google_gpt_page_output import GoogleGptPageOutput
-from .google_gpt_page_request import GoogleGptPageRequest
-from .google_gpt_page_request_embedding_model import GoogleGptPageRequestEmbeddingModel
-from .google_gpt_page_request_response_format_type import GoogleGptPageRequestResponseFormatType
-from .google_gpt_page_request_selected_model import GoogleGptPageRequestSelectedModel
-from .google_gpt_page_response import GoogleGptPageResponse
from .google_gpt_page_status_response import GoogleGptPageStatusResponse
from .google_image_gen_page_output import GoogleImageGenPageOutput
-from .google_image_gen_page_request import GoogleImageGenPageRequest
-from .google_image_gen_page_request_selected_model import GoogleImageGenPageRequestSelectedModel
-from .google_image_gen_page_response import GoogleImageGenPageResponse
from .google_image_gen_page_status_response import GoogleImageGenPageStatusResponse
from .http_validation_error import HttpValidationError
+from .image_from_email_request_selected_model import ImageFromEmailRequestSelectedModel
+from .image_from_web_search_request_selected_model import ImageFromWebSearchRequestSelectedModel
from .image_segmentation_page_output import ImageSegmentationPageOutput
-from .image_segmentation_page_request import ImageSegmentationPageRequest
-from .image_segmentation_page_request_selected_model import ImageSegmentationPageRequestSelectedModel
-from .image_segmentation_page_response import ImageSegmentationPageResponse
from .image_segmentation_page_status_response import ImageSegmentationPageStatusResponse
from .image_url import ImageUrl
from .image_url_detail import ImageUrlDetail
from .img2img_page_output import Img2ImgPageOutput
-from .img2img_page_request import Img2ImgPageRequest
-from .img2img_page_request_selected_controlnet_model import Img2ImgPageRequestSelectedControlnetModel
-from .img2img_page_request_selected_controlnet_model_item import Img2ImgPageRequestSelectedControlnetModelItem
-from .img2img_page_request_selected_model import Img2ImgPageRequestSelectedModel
-from .img2img_page_response import Img2ImgPageResponse
from .img2img_page_status_response import Img2ImgPageStatusResponse
from .letter_writer_page_output import LetterWriterPageOutput
from .letter_writer_page_request import LetterWriterPageRequest
-from .letter_writer_page_response import LetterWriterPageResponse
from .letter_writer_page_status_response import LetterWriterPageStatusResponse
from .lipsync_page_output import LipsyncPageOutput
-from .lipsync_page_request import LipsyncPageRequest
-from .lipsync_page_request_selected_model import LipsyncPageRequestSelectedModel
-from .lipsync_page_response import LipsyncPageResponse
from .lipsync_page_status_response import LipsyncPageStatusResponse
from .lipsync_tts_page_output import LipsyncTtsPageOutput
-from .lipsync_tts_page_request import LipsyncTtsPageRequest
-from .lipsync_tts_page_request_openai_tts_model import LipsyncTtsPageRequestOpenaiTtsModel
-from .lipsync_tts_page_request_openai_voice_name import LipsyncTtsPageRequestOpenaiVoiceName
-from .lipsync_tts_page_request_selected_model import LipsyncTtsPageRequestSelectedModel
-from .lipsync_tts_page_request_tts_provider import LipsyncTtsPageRequestTtsProvider
-from .lipsync_tts_page_response import LipsyncTtsPageResponse
from .lipsync_tts_page_status_response import LipsyncTtsPageStatusResponse
+from .lipsync_tts_request_openai_tts_model import LipsyncTtsRequestOpenaiTtsModel
+from .lipsync_tts_request_openai_voice_name import LipsyncTtsRequestOpenaiVoiceName
+from .lipsync_tts_request_selected_model import LipsyncTtsRequestSelectedModel
+from .lipsync_tts_request_tts_provider import LipsyncTtsRequestTtsProvider
+from .llm_request_response_format_type import LlmRequestResponseFormatType
+from .llm_request_selected_models_item import LlmRequestSelectedModelsItem
from .llm_tools import LlmTools
from .message_part import MessagePart
from .object_inpainting_page_output import ObjectInpaintingPageOutput
-from .object_inpainting_page_request import ObjectInpaintingPageRequest
-from .object_inpainting_page_request_selected_model import ObjectInpaintingPageRequestSelectedModel
-from .object_inpainting_page_response import ObjectInpaintingPageResponse
from .object_inpainting_page_status_response import ObjectInpaintingPageStatusResponse
+from .personalize_email_request_response_format_type import PersonalizeEmailRequestResponseFormatType
+from .personalize_email_request_selected_model import PersonalizeEmailRequestSelectedModel
+from .portrait_request_selected_model import PortraitRequestSelectedModel
+from .product_image_request_selected_model import ProductImageRequestSelectedModel
from .prompt_tree_node import PromptTreeNode
from .prompt_tree_node_prompt import PromptTreeNodePrompt
from .qr_code_generator_page_output import QrCodeGeneratorPageOutput
-from .qr_code_generator_page_request import QrCodeGeneratorPageRequest
-from .qr_code_generator_page_request_image_prompt_controlnet_models_item import (
- QrCodeGeneratorPageRequestImagePromptControlnetModelsItem,
-)
-from .qr_code_generator_page_request_scheduler import QrCodeGeneratorPageRequestScheduler
-from .qr_code_generator_page_request_selected_controlnet_model_item import (
- QrCodeGeneratorPageRequestSelectedControlnetModelItem,
-)
-from .qr_code_generator_page_request_selected_model import QrCodeGeneratorPageRequestSelectedModel
-from .qr_code_generator_page_response import QrCodeGeneratorPageResponse
from .qr_code_generator_page_status_response import QrCodeGeneratorPageStatusResponse
+from .qr_code_request_image_prompt_controlnet_models_item import QrCodeRequestImagePromptControlnetModelsItem
+from .qr_code_request_scheduler import QrCodeRequestScheduler
+from .qr_code_request_selected_controlnet_model_item import QrCodeRequestSelectedControlnetModelItem
+from .qr_code_request_selected_model import QrCodeRequestSelectedModel
+from .rag_request_citation_style import RagRequestCitationStyle
+from .rag_request_embedding_model import RagRequestEmbeddingModel
+from .rag_request_keyword_query import RagRequestKeywordQuery
+from .rag_request_response_format_type import RagRequestResponseFormatType
+from .rag_request_selected_model import RagRequestSelectedModel
from .recipe_function import RecipeFunction
from .recipe_function_trigger import RecipeFunctionTrigger
from .recipe_run_state import RecipeRunState
from .related_doc_search_response import RelatedDocSearchResponse
from .related_google_gpt_response import RelatedGoogleGptResponse
from .related_qn_a_doc_page_output import RelatedQnADocPageOutput
-from .related_qn_a_doc_page_request import RelatedQnADocPageRequest
-from .related_qn_a_doc_page_request_citation_style import RelatedQnADocPageRequestCitationStyle
-from .related_qn_a_doc_page_request_embedding_model import RelatedQnADocPageRequestEmbeddingModel
-from .related_qn_a_doc_page_request_keyword_query import RelatedQnADocPageRequestKeywordQuery
-from .related_qn_a_doc_page_request_response_format_type import RelatedQnADocPageRequestResponseFormatType
-from .related_qn_a_doc_page_request_selected_model import RelatedQnADocPageRequestSelectedModel
-from .related_qn_a_doc_page_response import RelatedQnADocPageResponse
from .related_qn_a_doc_page_status_response import RelatedQnADocPageStatusResponse
from .related_qn_a_page_output import RelatedQnAPageOutput
-from .related_qn_a_page_request import RelatedQnAPageRequest
-from .related_qn_a_page_request_embedding_model import RelatedQnAPageRequestEmbeddingModel
-from .related_qn_a_page_request_response_format_type import RelatedQnAPageRequestResponseFormatType
-from .related_qn_a_page_request_selected_model import RelatedQnAPageRequestSelectedModel
-from .related_qn_a_page_response import RelatedQnAPageResponse
from .related_qn_a_page_status_response import RelatedQnAPageStatusResponse
+from .remix_image_request_selected_controlnet_model import RemixImageRequestSelectedControlnetModel
+from .remix_image_request_selected_controlnet_model_item import RemixImageRequestSelectedControlnetModelItem
+from .remix_image_request_selected_model import RemixImageRequestSelectedModel
+from .remove_background_request_selected_model import RemoveBackgroundRequestSelectedModel
from .reply_button import ReplyButton
from .response_model import ResponseModel
from .response_model_final_keyword_query import ResponseModelFinalKeywordQuery
@@ -248,125 +140,74 @@
from .sad_talker_settings import SadTalkerSettings
from .sad_talker_settings_preprocess import SadTalkerSettingsPreprocess
from .search_reference import SearchReference
+from .seo_content_request_response_format_type import SeoContentRequestResponseFormatType
+from .seo_content_request_selected_model import SeoContentRequestSelectedModel
+from .seo_people_also_ask_doc_request_citation_style import SeoPeopleAlsoAskDocRequestCitationStyle
+from .seo_people_also_ask_doc_request_embedding_model import SeoPeopleAlsoAskDocRequestEmbeddingModel
+from .seo_people_also_ask_doc_request_keyword_query import SeoPeopleAlsoAskDocRequestKeywordQuery
+from .seo_people_also_ask_doc_request_response_format_type import SeoPeopleAlsoAskDocRequestResponseFormatType
+from .seo_people_also_ask_doc_request_selected_model import SeoPeopleAlsoAskDocRequestSelectedModel
+from .seo_people_also_ask_request_embedding_model import SeoPeopleAlsoAskRequestEmbeddingModel
+from .seo_people_also_ask_request_response_format_type import SeoPeopleAlsoAskRequestResponseFormatType
+from .seo_people_also_ask_request_selected_model import SeoPeopleAlsoAskRequestSelectedModel
from .seo_summary_page_output import SeoSummaryPageOutput
-from .seo_summary_page_request import SeoSummaryPageRequest
-from .seo_summary_page_request_response_format_type import SeoSummaryPageRequestResponseFormatType
-from .seo_summary_page_request_selected_model import SeoSummaryPageRequestSelectedModel
-from .seo_summary_page_response import SeoSummaryPageResponse
from .seo_summary_page_status_response import SeoSummaryPageStatusResponse
from .serp_search_location import SerpSearchLocation
from .serp_search_type import SerpSearchType
from .smart_gpt_page_output import SmartGptPageOutput
-from .smart_gpt_page_request import SmartGptPageRequest
-from .smart_gpt_page_request_response_format_type import SmartGptPageRequestResponseFormatType
-from .smart_gpt_page_request_selected_model import SmartGptPageRequestSelectedModel
-from .smart_gpt_page_response import SmartGptPageResponse
from .smart_gpt_page_status_response import SmartGptPageStatusResponse
from .social_lookup_email_page_output import SocialLookupEmailPageOutput
-from .social_lookup_email_page_request import SocialLookupEmailPageRequest
-from .social_lookup_email_page_request_response_format_type import SocialLookupEmailPageRequestResponseFormatType
-from .social_lookup_email_page_request_selected_model import SocialLookupEmailPageRequestSelectedModel
-from .social_lookup_email_page_response import SocialLookupEmailPageResponse
from .social_lookup_email_page_status_response import SocialLookupEmailPageStatusResponse
+from .speech_recognition_request_output_format import SpeechRecognitionRequestOutputFormat
+from .speech_recognition_request_selected_model import SpeechRecognitionRequestSelectedModel
+from .speech_recognition_request_translation_model import SpeechRecognitionRequestTranslationModel
from .stream_error import StreamError
+from .synthesize_data_request_response_format_type import SynthesizeDataRequestResponseFormatType
+from .synthesize_data_request_selected_asr_model import SynthesizeDataRequestSelectedAsrModel
+from .synthesize_data_request_selected_model import SynthesizeDataRequestSelectedModel
from .text2audio_page_output import Text2AudioPageOutput
-from .text2audio_page_request import Text2AudioPageRequest
-from .text2audio_page_response import Text2AudioPageResponse
from .text2audio_page_status_response import Text2AudioPageStatusResponse
+from .text_to_image_request_scheduler import TextToImageRequestScheduler
+from .text_to_image_request_selected_models_item import TextToImageRequestSelectedModelsItem
from .text_to_speech_page_output import TextToSpeechPageOutput
-from .text_to_speech_page_request import TextToSpeechPageRequest
-from .text_to_speech_page_request_openai_tts_model import TextToSpeechPageRequestOpenaiTtsModel
-from .text_to_speech_page_request_openai_voice_name import TextToSpeechPageRequestOpenaiVoiceName
-from .text_to_speech_page_request_tts_provider import TextToSpeechPageRequestTtsProvider
-from .text_to_speech_page_response import TextToSpeechPageResponse
from .text_to_speech_page_status_response import TextToSpeechPageStatusResponse
+from .text_to_speech_request_openai_tts_model import TextToSpeechRequestOpenaiTtsModel
+from .text_to_speech_request_openai_voice_name import TextToSpeechRequestOpenaiVoiceName
+from .text_to_speech_request_tts_provider import TextToSpeechRequestTtsProvider
from .training_data_model import TrainingDataModel
+from .translate_request_selected_model import TranslateRequestSelectedModel
from .translation_page_output import TranslationPageOutput
-from .translation_page_request import TranslationPageRequest
-from .translation_page_request_selected_model import TranslationPageRequestSelectedModel
-from .translation_page_response import TranslationPageResponse
from .translation_page_status_response import TranslationPageStatusResponse
+from .upscale_request_selected_models_item import UpscaleRequestSelectedModelsItem
from .validation_error import ValidationError
from .validation_error_loc_item import ValidationErrorLocItem
from .vcard import Vcard
from .video_bots_page_output import VideoBotsPageOutput
from .video_bots_page_output_final_keyword_query import VideoBotsPageOutputFinalKeywordQuery
from .video_bots_page_output_final_prompt import VideoBotsPageOutputFinalPrompt
-from .video_bots_page_request import VideoBotsPageRequest
-from .video_bots_page_request_asr_model import VideoBotsPageRequestAsrModel
-from .video_bots_page_request_citation_style import VideoBotsPageRequestCitationStyle
-from .video_bots_page_request_embedding_model import VideoBotsPageRequestEmbeddingModel
-from .video_bots_page_request_lipsync_model import VideoBotsPageRequestLipsyncModel
-from .video_bots_page_request_openai_tts_model import VideoBotsPageRequestOpenaiTtsModel
-from .video_bots_page_request_openai_voice_name import VideoBotsPageRequestOpenaiVoiceName
-from .video_bots_page_request_response_format_type import VideoBotsPageRequestResponseFormatType
-from .video_bots_page_request_selected_model import VideoBotsPageRequestSelectedModel
-from .video_bots_page_request_translation_model import VideoBotsPageRequestTranslationModel
-from .video_bots_page_request_tts_provider import VideoBotsPageRequestTtsProvider
-from .video_bots_page_response import VideoBotsPageResponse
from .video_bots_page_status_response import VideoBotsPageStatusResponse
+from .web_search_llm_request_embedding_model import WebSearchLlmRequestEmbeddingModel
+from .web_search_llm_request_response_format_type import WebSearchLlmRequestResponseFormatType
+from .web_search_llm_request_selected_model import WebSearchLlmRequestSelectedModel
__all__ = [
"AggFunction",
"AggFunctionFunction",
"AggFunctionResult",
"AggFunctionResultFunction",
+ "AnimateRequestSelectedModel",
"AnimationPrompt",
"AsrChunk",
"AsrOutputJson",
"AsrPageOutput",
"AsrPageOutputOutputTextItem",
- "AsrPageRequest",
- "AsrPageRequestOutputFormat",
- "AsrPageRequestSelectedModel",
- "AsrPageRequestTranslationModel",
- "AsrPageResponse",
"AsrPageStatusResponse",
"AsyncApiResponseModelV3",
"BalanceResponse",
- "BodyAsyncFormArtQrCode",
- "BodyAsyncFormAsr",
- "BodyAsyncFormBulkEval",
- "BodyAsyncFormBulkRunner",
- "BodyAsyncFormChyronPlant",
- "BodyAsyncFormCompareAiUpscalers",
- "BodyAsyncFormCompareLlm",
- "BodyAsyncFormCompareText2Img",
- "BodyAsyncFormDeforumSd",
- "BodyAsyncFormDocExtract",
- "BodyAsyncFormDocSearch",
- "BodyAsyncFormDocSummary",
- "BodyAsyncFormEmailFaceInpainting",
- "BodyAsyncFormEmbeddings",
- "BodyAsyncFormFaceInpainting",
- "BodyAsyncFormFunctions",
- "BodyAsyncFormGoogleGpt",
- "BodyAsyncFormGoogleImageGen",
- "BodyAsyncFormImageSegmentation",
- "BodyAsyncFormImg2Img",
- "BodyAsyncFormLetterWriter",
- "BodyAsyncFormLipsync",
- "BodyAsyncFormLipsyncTts",
- "BodyAsyncFormObjectInpainting",
- "BodyAsyncFormRelatedQnaMaker",
- "BodyAsyncFormRelatedQnaMakerDoc",
- "BodyAsyncFormSeoSummary",
- "BodyAsyncFormSmartGpt",
- "BodyAsyncFormSocialLookupEmail",
- "BodyAsyncFormText2Audio",
- "BodyAsyncFormTextToSpeech",
- "BodyAsyncFormTranslate",
- "BodyAsyncFormVideoBots",
"BotBroadcastFilters",
"BulkEvalPageOutput",
- "BulkEvalPageRequest",
- "BulkEvalPageRequestResponseFormatType",
- "BulkEvalPageRequestSelectedModel",
- "BulkEvalPageResponse",
"BulkEvalPageStatusResponse",
"BulkRunnerPageOutput",
- "BulkRunnerPageRequest",
- "BulkRunnerPageResponse",
"BulkRunnerPageStatusResponse",
"ButtonPressed",
"CalledFunctionResponse",
@@ -375,24 +216,12 @@
"ChatCompletionContentPartTextParam",
"ChyronPlantPageOutput",
"ChyronPlantPageRequest",
- "ChyronPlantPageResponse",
"ChyronPlantPageStatusResponse",
"CompareLlmPageOutput",
- "CompareLlmPageRequest",
- "CompareLlmPageRequestResponseFormatType",
- "CompareLlmPageRequestSelectedModelsItem",
- "CompareLlmPageResponse",
"CompareLlmPageStatusResponse",
"CompareText2ImgPageOutput",
- "CompareText2ImgPageRequest",
- "CompareText2ImgPageRequestScheduler",
- "CompareText2ImgPageRequestSelectedModelsItem",
- "CompareText2ImgPageResponse",
"CompareText2ImgPageStatusResponse",
"CompareUpscalerPageOutput",
- "CompareUpscalerPageRequest",
- "CompareUpscalerPageRequestSelectedModelsItem",
- "CompareUpscalerPageResponse",
"CompareUpscalerPageStatusResponse",
"ConsoleLogs",
"ConsoleLogsLevel",
@@ -405,140 +234,91 @@
"ConversationStart",
"CreateStreamResponse",
"DeforumSdPageOutput",
- "DeforumSdPageRequest",
- "DeforumSdPageRequestSelectedModel",
- "DeforumSdPageResponse",
"DeforumSdPageStatusResponse",
"DocExtractPageOutput",
- "DocExtractPageRequest",
- "DocExtractPageRequestResponseFormatType",
- "DocExtractPageRequestSelectedAsrModel",
- "DocExtractPageRequestSelectedModel",
- "DocExtractPageResponse",
"DocExtractPageStatusResponse",
"DocSearchPageOutput",
- "DocSearchPageRequest",
- "DocSearchPageRequestCitationStyle",
- "DocSearchPageRequestEmbeddingModel",
- "DocSearchPageRequestKeywordQuery",
- "DocSearchPageRequestResponseFormatType",
- "DocSearchPageRequestSelectedModel",
- "DocSearchPageResponse",
"DocSearchPageStatusResponse",
"DocSummaryPageOutput",
- "DocSummaryPageRequest",
- "DocSummaryPageRequestResponseFormatType",
- "DocSummaryPageRequestSelectedAsrModel",
- "DocSummaryPageRequestSelectedModel",
- "DocSummaryPageResponse",
"DocSummaryPageStatusResponse",
+ "DocSummaryRequestResponseFormatType",
+ "DocSummaryRequestSelectedAsrModel",
+ "DocSummaryRequestSelectedModel",
"EmailFaceInpaintingPageOutput",
- "EmailFaceInpaintingPageRequest",
- "EmailFaceInpaintingPageRequestSelectedModel",
- "EmailFaceInpaintingPageResponse",
"EmailFaceInpaintingPageStatusResponse",
+ "EmbedRequestSelectedModel",
"EmbeddingsPageOutput",
- "EmbeddingsPageRequest",
- "EmbeddingsPageRequestSelectedModel",
- "EmbeddingsPageResponse",
"EmbeddingsPageStatusResponse",
"EvalPrompt",
"FaceInpaintingPageOutput",
- "FaceInpaintingPageRequest",
- "FaceInpaintingPageRequestSelectedModel",
- "FaceInpaintingPageResponse",
"FaceInpaintingPageStatusResponse",
"FailedReponseModelV2",
"FailedResponseDetail",
"FinalResponse",
"FunctionsPageOutput",
- "FunctionsPageRequest",
- "FunctionsPageResponse",
"FunctionsPageStatusResponse",
"GenericErrorResponse",
"GenericErrorResponseDetail",
"GoogleGptPageOutput",
- "GoogleGptPageRequest",
- "GoogleGptPageRequestEmbeddingModel",
- "GoogleGptPageRequestResponseFormatType",
- "GoogleGptPageRequestSelectedModel",
- "GoogleGptPageResponse",
"GoogleGptPageStatusResponse",
"GoogleImageGenPageOutput",
- "GoogleImageGenPageRequest",
- "GoogleImageGenPageRequestSelectedModel",
- "GoogleImageGenPageResponse",
"GoogleImageGenPageStatusResponse",
"HttpValidationError",
+ "ImageFromEmailRequestSelectedModel",
+ "ImageFromWebSearchRequestSelectedModel",
"ImageSegmentationPageOutput",
- "ImageSegmentationPageRequest",
- "ImageSegmentationPageRequestSelectedModel",
- "ImageSegmentationPageResponse",
"ImageSegmentationPageStatusResponse",
"ImageUrl",
"ImageUrlDetail",
"Img2ImgPageOutput",
- "Img2ImgPageRequest",
- "Img2ImgPageRequestSelectedControlnetModel",
- "Img2ImgPageRequestSelectedControlnetModelItem",
- "Img2ImgPageRequestSelectedModel",
- "Img2ImgPageResponse",
"Img2ImgPageStatusResponse",
"LetterWriterPageOutput",
"LetterWriterPageRequest",
- "LetterWriterPageResponse",
"LetterWriterPageStatusResponse",
"LipsyncPageOutput",
- "LipsyncPageRequest",
- "LipsyncPageRequestSelectedModel",
- "LipsyncPageResponse",
"LipsyncPageStatusResponse",
"LipsyncTtsPageOutput",
- "LipsyncTtsPageRequest",
- "LipsyncTtsPageRequestOpenaiTtsModel",
- "LipsyncTtsPageRequestOpenaiVoiceName",
- "LipsyncTtsPageRequestSelectedModel",
- "LipsyncTtsPageRequestTtsProvider",
- "LipsyncTtsPageResponse",
"LipsyncTtsPageStatusResponse",
+ "LipsyncTtsRequestOpenaiTtsModel",
+ "LipsyncTtsRequestOpenaiVoiceName",
+ "LipsyncTtsRequestSelectedModel",
+ "LipsyncTtsRequestTtsProvider",
+ "LlmRequestResponseFormatType",
+ "LlmRequestSelectedModelsItem",
"LlmTools",
"MessagePart",
"ObjectInpaintingPageOutput",
- "ObjectInpaintingPageRequest",
- "ObjectInpaintingPageRequestSelectedModel",
- "ObjectInpaintingPageResponse",
"ObjectInpaintingPageStatusResponse",
+ "PersonalizeEmailRequestResponseFormatType",
+ "PersonalizeEmailRequestSelectedModel",
+ "PortraitRequestSelectedModel",
+ "ProductImageRequestSelectedModel",
"PromptTreeNode",
"PromptTreeNodePrompt",
"QrCodeGeneratorPageOutput",
- "QrCodeGeneratorPageRequest",
- "QrCodeGeneratorPageRequestImagePromptControlnetModelsItem",
- "QrCodeGeneratorPageRequestScheduler",
- "QrCodeGeneratorPageRequestSelectedControlnetModelItem",
- "QrCodeGeneratorPageRequestSelectedModel",
- "QrCodeGeneratorPageResponse",
"QrCodeGeneratorPageStatusResponse",
+ "QrCodeRequestImagePromptControlnetModelsItem",
+ "QrCodeRequestScheduler",
+ "QrCodeRequestSelectedControlnetModelItem",
+ "QrCodeRequestSelectedModel",
+ "RagRequestCitationStyle",
+ "RagRequestEmbeddingModel",
+ "RagRequestKeywordQuery",
+ "RagRequestResponseFormatType",
+ "RagRequestSelectedModel",
"RecipeFunction",
"RecipeFunctionTrigger",
"RecipeRunState",
"RelatedDocSearchResponse",
"RelatedGoogleGptResponse",
"RelatedQnADocPageOutput",
- "RelatedQnADocPageRequest",
- "RelatedQnADocPageRequestCitationStyle",
- "RelatedQnADocPageRequestEmbeddingModel",
- "RelatedQnADocPageRequestKeywordQuery",
- "RelatedQnADocPageRequestResponseFormatType",
- "RelatedQnADocPageRequestSelectedModel",
- "RelatedQnADocPageResponse",
"RelatedQnADocPageStatusResponse",
"RelatedQnAPageOutput",
- "RelatedQnAPageRequest",
- "RelatedQnAPageRequestEmbeddingModel",
- "RelatedQnAPageRequestResponseFormatType",
- "RelatedQnAPageRequestSelectedModel",
- "RelatedQnAPageResponse",
"RelatedQnAPageStatusResponse",
+ "RemixImageRequestSelectedControlnetModel",
+ "RemixImageRequestSelectedControlnetModelItem",
+ "RemixImageRequestSelectedModel",
+ "RemoveBackgroundRequestSelectedModel",
"ReplyButton",
"ResponseModel",
"ResponseModelFinalKeywordQuery",
@@ -549,61 +329,53 @@
"SadTalkerSettings",
"SadTalkerSettingsPreprocess",
"SearchReference",
+ "SeoContentRequestResponseFormatType",
+ "SeoContentRequestSelectedModel",
+ "SeoPeopleAlsoAskDocRequestCitationStyle",
+ "SeoPeopleAlsoAskDocRequestEmbeddingModel",
+ "SeoPeopleAlsoAskDocRequestKeywordQuery",
+ "SeoPeopleAlsoAskDocRequestResponseFormatType",
+ "SeoPeopleAlsoAskDocRequestSelectedModel",
+ "SeoPeopleAlsoAskRequestEmbeddingModel",
+ "SeoPeopleAlsoAskRequestResponseFormatType",
+ "SeoPeopleAlsoAskRequestSelectedModel",
"SeoSummaryPageOutput",
- "SeoSummaryPageRequest",
- "SeoSummaryPageRequestResponseFormatType",
- "SeoSummaryPageRequestSelectedModel",
- "SeoSummaryPageResponse",
"SeoSummaryPageStatusResponse",
"SerpSearchLocation",
"SerpSearchType",
"SmartGptPageOutput",
- "SmartGptPageRequest",
- "SmartGptPageRequestResponseFormatType",
- "SmartGptPageRequestSelectedModel",
- "SmartGptPageResponse",
"SmartGptPageStatusResponse",
"SocialLookupEmailPageOutput",
- "SocialLookupEmailPageRequest",
- "SocialLookupEmailPageRequestResponseFormatType",
- "SocialLookupEmailPageRequestSelectedModel",
- "SocialLookupEmailPageResponse",
"SocialLookupEmailPageStatusResponse",
+ "SpeechRecognitionRequestOutputFormat",
+ "SpeechRecognitionRequestSelectedModel",
+ "SpeechRecognitionRequestTranslationModel",
"StreamError",
+ "SynthesizeDataRequestResponseFormatType",
+ "SynthesizeDataRequestSelectedAsrModel",
+ "SynthesizeDataRequestSelectedModel",
"Text2AudioPageOutput",
- "Text2AudioPageRequest",
- "Text2AudioPageResponse",
"Text2AudioPageStatusResponse",
+ "TextToImageRequestScheduler",
+ "TextToImageRequestSelectedModelsItem",
"TextToSpeechPageOutput",
- "TextToSpeechPageRequest",
- "TextToSpeechPageRequestOpenaiTtsModel",
- "TextToSpeechPageRequestOpenaiVoiceName",
- "TextToSpeechPageRequestTtsProvider",
- "TextToSpeechPageResponse",
"TextToSpeechPageStatusResponse",
+ "TextToSpeechRequestOpenaiTtsModel",
+ "TextToSpeechRequestOpenaiVoiceName",
+ "TextToSpeechRequestTtsProvider",
"TrainingDataModel",
+ "TranslateRequestSelectedModel",
"TranslationPageOutput",
- "TranslationPageRequest",
- "TranslationPageRequestSelectedModel",
- "TranslationPageResponse",
"TranslationPageStatusResponse",
+ "UpscaleRequestSelectedModelsItem",
"ValidationError",
"ValidationErrorLocItem",
"Vcard",
"VideoBotsPageOutput",
"VideoBotsPageOutputFinalKeywordQuery",
"VideoBotsPageOutputFinalPrompt",
- "VideoBotsPageRequest",
- "VideoBotsPageRequestAsrModel",
- "VideoBotsPageRequestCitationStyle",
- "VideoBotsPageRequestEmbeddingModel",
- "VideoBotsPageRequestLipsyncModel",
- "VideoBotsPageRequestOpenaiTtsModel",
- "VideoBotsPageRequestOpenaiVoiceName",
- "VideoBotsPageRequestResponseFormatType",
- "VideoBotsPageRequestSelectedModel",
- "VideoBotsPageRequestTranslationModel",
- "VideoBotsPageRequestTtsProvider",
- "VideoBotsPageResponse",
"VideoBotsPageStatusResponse",
+ "WebSearchLlmRequestEmbeddingModel",
+ "WebSearchLlmRequestResponseFormatType",
+ "WebSearchLlmRequestSelectedModel",
]
diff --git a/src/gooey/types/animate_request_selected_model.py b/src/gooey/types/animate_request_selected_model.py
new file mode 100644
index 0000000..d8ab4b0
--- /dev/null
+++ b/src/gooey/types/animate_request_selected_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AnimateRequestSelectedModel = typing.Union[typing.Literal["protogen_2_2", "epicdream"], typing.Any]
diff --git a/src/gooey/types/asr_page_request.py b/src/gooey/types/asr_page_request.py
deleted file mode 100644
index 228b6ff..0000000
--- a/src/gooey/types/asr_page_request.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .asr_page_request_output_format import AsrPageRequestOutputFormat
-from .asr_page_request_selected_model import AsrPageRequestSelectedModel
-from .asr_page_request_translation_model import AsrPageRequestTranslationModel
-from .recipe_function import RecipeFunction
-from .run_settings import RunSettings
-
-
-class AsrPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- documents: typing.List[str]
- selected_model: typing.Optional[AsrPageRequestSelectedModel] = None
- language: typing.Optional[str] = None
- translation_model: typing.Optional[AsrPageRequestTranslationModel] = None
- output_format: typing.Optional[AsrPageRequestOutputFormat] = None
- google_translate_target: typing.Optional[str] = pydantic.Field(default=None)
- """
- use `translation_model` & `translation_target` instead.
- """
-
- translation_source: typing.Optional[str] = None
- translation_target: typing.Optional[str] = None
- glossary_document: typing.Optional[str] = pydantic.Field(default=None)
- """
- Provide a glossary to customize translation and improve accuracy of domain-specific terms.
- If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
- """
-
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/asr_page_request_output_format.py b/src/gooey/types/asr_page_request_output_format.py
deleted file mode 100644
index 101e681..0000000
--- a/src/gooey/types/asr_page_request_output_format.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-AsrPageRequestOutputFormat = typing.Union[typing.Literal["text", "json", "srt", "vtt"], typing.Any]
diff --git a/src/gooey/types/asr_page_request_translation_model.py b/src/gooey/types/asr_page_request_translation_model.py
deleted file mode 100644
index d5dcef6..0000000
--- a/src/gooey/types/asr_page_request_translation_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-AsrPageRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any]
diff --git a/src/gooey/types/asr_page_response.py b/src/gooey/types/asr_page_response.py
deleted file mode 100644
index c3114d4..0000000
--- a/src/gooey/types/asr_page_response.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .asr_page_output import AsrPageOutput
-
-
-class AsrPageResponse(UniversalBaseModel):
- id: str = pydantic.Field()
- """
- Unique ID for this run
- """
-
- url: str = pydantic.Field()
- """
- Web URL for this run
- """
-
- created_at: str = pydantic.Field()
- """
- Time when the run was created as ISO format
- """
-
- output: AsrPageOutput = pydantic.Field()
- """
- Output of the run
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_art_qr_code.py b/src/gooey/types/body_async_form_art_qr_code.py
deleted file mode 100644
index 10f380c..0000000
--- a/src/gooey/types/body_async_form_art_qr_code.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class BodyAsyncFormArtQrCode(UniversalBaseModel):
- json_: str = pydantic.Field(alias="json")
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_asr.py b/src/gooey/types/body_async_form_asr.py
deleted file mode 100644
index ec63317..0000000
--- a/src/gooey/types/body_async_form_asr.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class BodyAsyncFormAsr(UniversalBaseModel):
- json_: str = pydantic.Field(alias="json")
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_bulk_eval.py b/src/gooey/types/body_async_form_bulk_eval.py
deleted file mode 100644
index 4bce6fa..0000000
--- a/src/gooey/types/body_async_form_bulk_eval.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class BodyAsyncFormBulkEval(UniversalBaseModel):
- json_: str = pydantic.Field(alias="json")
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_bulk_runner.py b/src/gooey/types/body_async_form_bulk_runner.py
deleted file mode 100644
index 1460309..0000000
--- a/src/gooey/types/body_async_form_bulk_runner.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class BodyAsyncFormBulkRunner(UniversalBaseModel):
- json_: str = pydantic.Field(alias="json")
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_chyron_plant.py b/src/gooey/types/body_async_form_chyron_plant.py
deleted file mode 100644
index 21b2f9d..0000000
--- a/src/gooey/types/body_async_form_chyron_plant.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class BodyAsyncFormChyronPlant(UniversalBaseModel):
- json_: str = pydantic.Field(alias="json")
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_compare_ai_upscalers.py b/src/gooey/types/body_async_form_compare_ai_upscalers.py
deleted file mode 100644
index 1379dcd..0000000
--- a/src/gooey/types/body_async_form_compare_ai_upscalers.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class BodyAsyncFormCompareAiUpscalers(UniversalBaseModel):
- json_: str = pydantic.Field(alias="json")
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_compare_llm.py b/src/gooey/types/body_async_form_compare_llm.py
deleted file mode 100644
index c9648d2..0000000
--- a/src/gooey/types/body_async_form_compare_llm.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class BodyAsyncFormCompareLlm(UniversalBaseModel):
- json_: str = pydantic.Field(alias="json")
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_compare_text2img.py b/src/gooey/types/body_async_form_compare_text2img.py
deleted file mode 100644
index c33c36e..0000000
--- a/src/gooey/types/body_async_form_compare_text2img.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class BodyAsyncFormCompareText2Img(UniversalBaseModel):
- json_: str = pydantic.Field(alias="json")
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_deforum_sd.py b/src/gooey/types/body_async_form_deforum_sd.py
deleted file mode 100644
index 3bc9b38..0000000
--- a/src/gooey/types/body_async_form_deforum_sd.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class BodyAsyncFormDeforumSd(UniversalBaseModel):
- json_: str = pydantic.Field(alias="json")
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_doc_extract.py b/src/gooey/types/body_async_form_doc_extract.py
deleted file mode 100644
index ac7eb62..0000000
--- a/src/gooey/types/body_async_form_doc_extract.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class BodyAsyncFormDocExtract(UniversalBaseModel):
- json_: str = pydantic.Field(alias="json")
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_doc_search.py b/src/gooey/types/body_async_form_doc_search.py
deleted file mode 100644
index 5f92368..0000000
--- a/src/gooey/types/body_async_form_doc_search.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class BodyAsyncFormDocSearch(UniversalBaseModel):
- json_: str = pydantic.Field(alias="json")
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_doc_summary.py b/src/gooey/types/body_async_form_doc_summary.py
deleted file mode 100644
index 9464de3..0000000
--- a/src/gooey/types/body_async_form_doc_summary.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class BodyAsyncFormDocSummary(UniversalBaseModel):
- json_: str = pydantic.Field(alias="json")
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_email_face_inpainting.py b/src/gooey/types/body_async_form_email_face_inpainting.py
deleted file mode 100644
index 73b8810..0000000
--- a/src/gooey/types/body_async_form_email_face_inpainting.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class BodyAsyncFormEmailFaceInpainting(UniversalBaseModel):
- json_: str = pydantic.Field(alias="json")
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_embeddings.py b/src/gooey/types/body_async_form_embeddings.py
deleted file mode 100644
index b2f780a..0000000
--- a/src/gooey/types/body_async_form_embeddings.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class BodyAsyncFormEmbeddings(UniversalBaseModel):
- json_: str = pydantic.Field(alias="json")
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_face_inpainting.py b/src/gooey/types/body_async_form_face_inpainting.py
deleted file mode 100644
index 335f399..0000000
--- a/src/gooey/types/body_async_form_face_inpainting.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class BodyAsyncFormFaceInpainting(UniversalBaseModel):
- json_: str = pydantic.Field(alias="json")
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_functions.py b/src/gooey/types/body_async_form_functions.py
deleted file mode 100644
index c9fe013..0000000
--- a/src/gooey/types/body_async_form_functions.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class BodyAsyncFormFunctions(UniversalBaseModel):
- json_: str = pydantic.Field(alias="json")
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_google_gpt.py b/src/gooey/types/body_async_form_google_gpt.py
deleted file mode 100644
index 20d2068..0000000
--- a/src/gooey/types/body_async_form_google_gpt.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class BodyAsyncFormGoogleGpt(UniversalBaseModel):
- json_: str = pydantic.Field(alias="json")
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_google_image_gen.py b/src/gooey/types/body_async_form_google_image_gen.py
deleted file mode 100644
index 1ca013e..0000000
--- a/src/gooey/types/body_async_form_google_image_gen.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class BodyAsyncFormGoogleImageGen(UniversalBaseModel):
- json_: str = pydantic.Field(alias="json")
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_image_segmentation.py b/src/gooey/types/body_async_form_image_segmentation.py
deleted file mode 100644
index 077a5a1..0000000
--- a/src/gooey/types/body_async_form_image_segmentation.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class BodyAsyncFormImageSegmentation(UniversalBaseModel):
- json_: str = pydantic.Field(alias="json")
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_img2img.py b/src/gooey/types/body_async_form_img2img.py
deleted file mode 100644
index ba1b683..0000000
--- a/src/gooey/types/body_async_form_img2img.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class BodyAsyncFormImg2Img(UniversalBaseModel):
- json_: str = pydantic.Field(alias="json")
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_letter_writer.py b/src/gooey/types/body_async_form_letter_writer.py
deleted file mode 100644
index c2e1d76..0000000
--- a/src/gooey/types/body_async_form_letter_writer.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class BodyAsyncFormLetterWriter(UniversalBaseModel):
- json_: str = pydantic.Field(alias="json")
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_lipsync.py b/src/gooey/types/body_async_form_lipsync.py
deleted file mode 100644
index a236a43..0000000
--- a/src/gooey/types/body_async_form_lipsync.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class BodyAsyncFormLipsync(UniversalBaseModel):
- json_: str = pydantic.Field(alias="json")
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_lipsync_tts.py b/src/gooey/types/body_async_form_lipsync_tts.py
deleted file mode 100644
index dc951af..0000000
--- a/src/gooey/types/body_async_form_lipsync_tts.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class BodyAsyncFormLipsyncTts(UniversalBaseModel):
- json_: str = pydantic.Field(alias="json")
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_object_inpainting.py b/src/gooey/types/body_async_form_object_inpainting.py
deleted file mode 100644
index ccfadef..0000000
--- a/src/gooey/types/body_async_form_object_inpainting.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class BodyAsyncFormObjectInpainting(UniversalBaseModel):
- json_: str = pydantic.Field(alias="json")
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_related_qna_maker.py b/src/gooey/types/body_async_form_related_qna_maker.py
deleted file mode 100644
index a59459f..0000000
--- a/src/gooey/types/body_async_form_related_qna_maker.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class BodyAsyncFormRelatedQnaMaker(UniversalBaseModel):
- json_: str = pydantic.Field(alias="json")
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_related_qna_maker_doc.py b/src/gooey/types/body_async_form_related_qna_maker_doc.py
deleted file mode 100644
index deb15bb..0000000
--- a/src/gooey/types/body_async_form_related_qna_maker_doc.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class BodyAsyncFormRelatedQnaMakerDoc(UniversalBaseModel):
- json_: str = pydantic.Field(alias="json")
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_seo_summary.py b/src/gooey/types/body_async_form_seo_summary.py
deleted file mode 100644
index 6a074ee..0000000
--- a/src/gooey/types/body_async_form_seo_summary.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class BodyAsyncFormSeoSummary(UniversalBaseModel):
- json_: str = pydantic.Field(alias="json")
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_smart_gpt.py b/src/gooey/types/body_async_form_smart_gpt.py
deleted file mode 100644
index e2f29f7..0000000
--- a/src/gooey/types/body_async_form_smart_gpt.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class BodyAsyncFormSmartGpt(UniversalBaseModel):
- json_: str = pydantic.Field(alias="json")
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_social_lookup_email.py b/src/gooey/types/body_async_form_social_lookup_email.py
deleted file mode 100644
index ce1890c..0000000
--- a/src/gooey/types/body_async_form_social_lookup_email.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class BodyAsyncFormSocialLookupEmail(UniversalBaseModel):
- json_: str = pydantic.Field(alias="json")
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_text2audio.py b/src/gooey/types/body_async_form_text2audio.py
deleted file mode 100644
index c6f38ef..0000000
--- a/src/gooey/types/body_async_form_text2audio.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class BodyAsyncFormText2Audio(UniversalBaseModel):
- json_: str = pydantic.Field(alias="json")
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_text_to_speech.py b/src/gooey/types/body_async_form_text_to_speech.py
deleted file mode 100644
index 6cbc13b..0000000
--- a/src/gooey/types/body_async_form_text_to_speech.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class BodyAsyncFormTextToSpeech(UniversalBaseModel):
- json_: str = pydantic.Field(alias="json")
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_translate.py b/src/gooey/types/body_async_form_translate.py
deleted file mode 100644
index 5434bc1..0000000
--- a/src/gooey/types/body_async_form_translate.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class BodyAsyncFormTranslate(UniversalBaseModel):
- json_: str = pydantic.Field(alias="json")
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/body_async_form_video_bots.py b/src/gooey/types/body_async_form_video_bots.py
deleted file mode 100644
index 706881d..0000000
--- a/src/gooey/types/body_async_form_video_bots.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class BodyAsyncFormVideoBots(UniversalBaseModel):
- json_: str = pydantic.Field(alias="json")
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/bulk_eval_page_request.py b/src/gooey/types/bulk_eval_page_request.py
deleted file mode 100644
index 9981bd3..0000000
--- a/src/gooey/types/bulk_eval_page_request.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .agg_function import AggFunction
-from .bulk_eval_page_request_response_format_type import BulkEvalPageRequestResponseFormatType
-from .bulk_eval_page_request_selected_model import BulkEvalPageRequestSelectedModel
-from .eval_prompt import EvalPrompt
-from .recipe_function import RecipeFunction
-from .run_settings import RunSettings
-
-
-class BulkEvalPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- documents: typing.List[str] = pydantic.Field()
- """
- Upload or link to a CSV or google sheet that contains your sample input data.
- For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs.
- Remember to includes header names in your CSV too.
- """
-
- eval_prompts: typing.Optional[typing.List[EvalPrompt]] = pydantic.Field(default=None)
- """
- Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values.
- _The `columns` dictionary can be used to reference the spreadsheet columns._
- """
-
- agg_functions: typing.Optional[typing.List[AggFunction]] = pydantic.Field(default=None)
- """
- Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats).
- """
-
- selected_model: typing.Optional[BulkEvalPageRequestSelectedModel] = None
- avoid_repetition: typing.Optional[bool] = None
- num_outputs: typing.Optional[int] = None
- quality: typing.Optional[float] = None
- max_tokens: typing.Optional[int] = None
- sampling_temperature: typing.Optional[float] = None
- response_format_type: typing.Optional[BulkEvalPageRequestResponseFormatType] = None
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/bulk_eval_page_response.py b/src/gooey/types/bulk_eval_page_response.py
deleted file mode 100644
index c267909..0000000
--- a/src/gooey/types/bulk_eval_page_response.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .bulk_eval_page_output import BulkEvalPageOutput
-
-
-class BulkEvalPageResponse(UniversalBaseModel):
- id: str = pydantic.Field()
- """
- Unique ID for this run
- """
-
- url: str = pydantic.Field()
- """
- Web URL for this run
- """
-
- created_at: str = pydantic.Field()
- """
- Time when the run was created as ISO format
- """
-
- output: BulkEvalPageOutput = pydantic.Field()
- """
- Output of the run
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/bulk_runner_page_request.py b/src/gooey/types/bulk_runner_page_request.py
deleted file mode 100644
index d785c72..0000000
--- a/src/gooey/types/bulk_runner_page_request.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .recipe_function import RecipeFunction
-from .run_settings import RunSettings
-
-
-class BulkRunnerPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- documents: typing.List[str] = pydantic.Field()
- """
- Upload or link to a CSV or google sheet that contains your sample input data.
- For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs.
- Remember to includes header names in your CSV too.
- """
-
- run_urls: typing.List[str] = pydantic.Field()
- """
- Provide one or more Gooey.AI workflow runs.
- You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them.
- """
-
- input_columns: typing.Dict[str, str] = pydantic.Field()
- """
- For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it.
- """
-
- output_columns: typing.Dict[str, str] = pydantic.Field()
- """
- For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data.
- """
-
- eval_urls: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
- """
- _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs.
- """
-
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/bulk_runner_page_response.py b/src/gooey/types/bulk_runner_page_response.py
deleted file mode 100644
index 7ba56fd..0000000
--- a/src/gooey/types/bulk_runner_page_response.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .bulk_runner_page_output import BulkRunnerPageOutput
-
-
-class BulkRunnerPageResponse(UniversalBaseModel):
- id: str = pydantic.Field()
- """
- Unique ID for this run
- """
-
- url: str = pydantic.Field()
- """
- Web URL for this run
- """
-
- created_at: str = pydantic.Field()
- """
- Time when the run was created as ISO format
- """
-
- output: BulkRunnerPageOutput = pydantic.Field()
- """
- Output of the run
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/chyron_plant_page_response.py b/src/gooey/types/chyron_plant_page_response.py
deleted file mode 100644
index a806978..0000000
--- a/src/gooey/types/chyron_plant_page_response.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .chyron_plant_page_output import ChyronPlantPageOutput
-
-
-class ChyronPlantPageResponse(UniversalBaseModel):
- id: str = pydantic.Field()
- """
- Unique ID for this run
- """
-
- url: str = pydantic.Field()
- """
- Web URL for this run
- """
-
- created_at: str = pydantic.Field()
- """
- Time when the run was created as ISO format
- """
-
- output: ChyronPlantPageOutput = pydantic.Field()
- """
- Output of the run
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/compare_llm_page_request.py b/src/gooey/types/compare_llm_page_request.py
deleted file mode 100644
index 87ae925..0000000
--- a/src/gooey/types/compare_llm_page_request.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .compare_llm_page_request_response_format_type import CompareLlmPageRequestResponseFormatType
-from .compare_llm_page_request_selected_models_item import CompareLlmPageRequestSelectedModelsItem
-from .recipe_function import RecipeFunction
-from .run_settings import RunSettings
-
-
-class CompareLlmPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- input_prompt: typing.Optional[str] = None
- selected_models: typing.Optional[typing.List[CompareLlmPageRequestSelectedModelsItem]] = None
- avoid_repetition: typing.Optional[bool] = None
- num_outputs: typing.Optional[int] = None
- quality: typing.Optional[float] = None
- max_tokens: typing.Optional[int] = None
- sampling_temperature: typing.Optional[float] = None
- response_format_type: typing.Optional[CompareLlmPageRequestResponseFormatType] = None
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/compare_llm_page_response.py b/src/gooey/types/compare_llm_page_response.py
deleted file mode 100644
index 7acae74..0000000
--- a/src/gooey/types/compare_llm_page_response.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .compare_llm_page_output import CompareLlmPageOutput
-
-
-class CompareLlmPageResponse(UniversalBaseModel):
- id: str = pydantic.Field()
- """
- Unique ID for this run
- """
-
- url: str = pydantic.Field()
- """
- Web URL for this run
- """
-
- created_at: str = pydantic.Field()
- """
- Time when the run was created as ISO format
- """
-
- output: CompareLlmPageOutput = pydantic.Field()
- """
- Output of the run
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/compare_text2img_page_request.py b/src/gooey/types/compare_text2img_page_request.py
deleted file mode 100644
index fbfeb11..0000000
--- a/src/gooey/types/compare_text2img_page_request.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .compare_text2img_page_request_scheduler import CompareText2ImgPageRequestScheduler
-from .compare_text2img_page_request_selected_models_item import CompareText2ImgPageRequestSelectedModelsItem
-from .recipe_function import RecipeFunction
-from .run_settings import RunSettings
-
-
-class CompareText2ImgPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- text_prompt: str
- negative_prompt: typing.Optional[str] = None
- output_width: typing.Optional[int] = None
- output_height: typing.Optional[int] = None
- num_outputs: typing.Optional[int] = None
- quality: typing.Optional[int] = None
- dall_e3quality: typing.Optional[str] = pydantic.Field(alias="dall_e_3_quality", default=None)
- dall_e3style: typing.Optional[str] = pydantic.Field(alias="dall_e_3_style", default=None)
- guidance_scale: typing.Optional[float] = None
- seed: typing.Optional[int] = None
- sd2upscaling: typing.Optional[bool] = pydantic.Field(alias="sd_2_upscaling", default=None)
- selected_models: typing.Optional[typing.List[CompareText2ImgPageRequestSelectedModelsItem]] = None
- scheduler: typing.Optional[CompareText2ImgPageRequestScheduler] = None
- edit_instruction: typing.Optional[str] = None
- image_guidance_scale: typing.Optional[float] = None
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/compare_text2img_page_response.py b/src/gooey/types/compare_text2img_page_response.py
deleted file mode 100644
index a11649a..0000000
--- a/src/gooey/types/compare_text2img_page_response.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .compare_text2img_page_output import CompareText2ImgPageOutput
-
-
-class CompareText2ImgPageResponse(UniversalBaseModel):
- id: str = pydantic.Field()
- """
- Unique ID for this run
- """
-
- url: str = pydantic.Field()
- """
- Web URL for this run
- """
-
- created_at: str = pydantic.Field()
- """
- Time when the run was created as ISO format
- """
-
- output: CompareText2ImgPageOutput = pydantic.Field()
- """
- Output of the run
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/compare_upscaler_page_request.py b/src/gooey/types/compare_upscaler_page_request.py
deleted file mode 100644
index 00411a5..0000000
--- a/src/gooey/types/compare_upscaler_page_request.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .compare_upscaler_page_request_selected_models_item import CompareUpscalerPageRequestSelectedModelsItem
-from .recipe_function import RecipeFunction
-from .run_settings import RunSettings
-
-
-class CompareUpscalerPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- input_image: typing.Optional[str] = pydantic.Field(default=None)
- """
- Input Image
- """
-
- input_video: typing.Optional[str] = pydantic.Field(default=None)
- """
- Input Video
- """
-
- scale: int = pydantic.Field()
- """
- The final upsampling scale of the image
- """
-
- selected_models: typing.Optional[typing.List[CompareUpscalerPageRequestSelectedModelsItem]] = None
- selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = None
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/compare_upscaler_page_response.py b/src/gooey/types/compare_upscaler_page_response.py
deleted file mode 100644
index 4248486..0000000
--- a/src/gooey/types/compare_upscaler_page_response.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .compare_upscaler_page_output import CompareUpscalerPageOutput
-
-
-class CompareUpscalerPageResponse(UniversalBaseModel):
- id: str = pydantic.Field()
- """
- Unique ID for this run
- """
-
- url: str = pydantic.Field()
- """
- Web URL for this run
- """
-
- created_at: str = pydantic.Field()
- """
- Time when the run was created as ISO format
- """
-
- output: CompareUpscalerPageOutput = pydantic.Field()
- """
- Output of the run
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/deforum_sd_page_request.py b/src/gooey/types/deforum_sd_page_request.py
deleted file mode 100644
index 79f6d06..0000000
--- a/src/gooey/types/deforum_sd_page_request.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .animation_prompt import AnimationPrompt
-from .deforum_sd_page_request_selected_model import DeforumSdPageRequestSelectedModel
-from .recipe_function import RecipeFunction
-from .run_settings import RunSettings
-
-
-class DeforumSdPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- animation_prompts: typing.List[AnimationPrompt]
- max_frames: typing.Optional[int] = None
- selected_model: typing.Optional[DeforumSdPageRequestSelectedModel] = None
- animation_mode: typing.Optional[str] = None
- zoom: typing.Optional[str] = None
- translation_x: typing.Optional[str] = None
- translation_y: typing.Optional[str] = None
- rotation3d_x: typing.Optional[str] = pydantic.Field(alias="rotation_3d_x", default=None)
- rotation3d_y: typing.Optional[str] = pydantic.Field(alias="rotation_3d_y", default=None)
- rotation3d_z: typing.Optional[str] = pydantic.Field(alias="rotation_3d_z", default=None)
- fps: typing.Optional[int] = None
- seed: typing.Optional[int] = None
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/deforum_sd_page_request_selected_model.py b/src/gooey/types/deforum_sd_page_request_selected_model.py
deleted file mode 100644
index 3af657a..0000000
--- a/src/gooey/types/deforum_sd_page_request_selected_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-DeforumSdPageRequestSelectedModel = typing.Union[typing.Literal["protogen_2_2", "epicdream"], typing.Any]
diff --git a/src/gooey/types/deforum_sd_page_response.py b/src/gooey/types/deforum_sd_page_response.py
deleted file mode 100644
index a8f4a06..0000000
--- a/src/gooey/types/deforum_sd_page_response.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .deforum_sd_page_output import DeforumSdPageOutput
-
-
-class DeforumSdPageResponse(UniversalBaseModel):
- id: str = pydantic.Field()
- """
- Unique ID for this run
- """
-
- url: str = pydantic.Field()
- """
- Web URL for this run
- """
-
- created_at: str = pydantic.Field()
- """
- Time when the run was created as ISO format
- """
-
- output: DeforumSdPageOutput = pydantic.Field()
- """
- Output of the run
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/doc_extract_page_request.py b/src/gooey/types/doc_extract_page_request.py
deleted file mode 100644
index 1942904..0000000
--- a/src/gooey/types/doc_extract_page_request.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .doc_extract_page_request_response_format_type import DocExtractPageRequestResponseFormatType
-from .doc_extract_page_request_selected_asr_model import DocExtractPageRequestSelectedAsrModel
-from .doc_extract_page_request_selected_model import DocExtractPageRequestSelectedModel
-from .recipe_function import RecipeFunction
-from .run_settings import RunSettings
-
-
-class DocExtractPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- documents: typing.List[str]
- sheet_url: typing.Optional[str] = None
- selected_asr_model: typing.Optional[DocExtractPageRequestSelectedAsrModel] = None
- google_translate_target: typing.Optional[str] = None
- glossary_document: typing.Optional[str] = pydantic.Field(default=None)
- """
- Provide a glossary to customize translation and improve accuracy of domain-specific terms.
- If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
- """
-
- task_instructions: typing.Optional[str] = None
- selected_model: typing.Optional[DocExtractPageRequestSelectedModel] = None
- avoid_repetition: typing.Optional[bool] = None
- num_outputs: typing.Optional[int] = None
- quality: typing.Optional[float] = None
- max_tokens: typing.Optional[int] = None
- sampling_temperature: typing.Optional[float] = None
- response_format_type: typing.Optional[DocExtractPageRequestResponseFormatType] = None
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/doc_extract_page_request_response_format_type.py b/src/gooey/types/doc_extract_page_request_response_format_type.py
deleted file mode 100644
index 0ad7c14..0000000
--- a/src/gooey/types/doc_extract_page_request_response_format_type.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-DocExtractPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/types/doc_extract_page_response.py b/src/gooey/types/doc_extract_page_response.py
deleted file mode 100644
index 62a08d8..0000000
--- a/src/gooey/types/doc_extract_page_response.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .doc_extract_page_output import DocExtractPageOutput
-
-
-class DocExtractPageResponse(UniversalBaseModel):
- id: str = pydantic.Field()
- """
- Unique ID for this run
- """
-
- url: str = pydantic.Field()
- """
- Web URL for this run
- """
-
- created_at: str = pydantic.Field()
- """
- Time when the run was created as ISO format
- """
-
- output: DocExtractPageOutput = pydantic.Field()
- """
- Output of the run
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/doc_search_page_request.py b/src/gooey/types/doc_search_page_request.py
deleted file mode 100644
index 73d4d6e..0000000
--- a/src/gooey/types/doc_search_page_request.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .doc_search_page_request_citation_style import DocSearchPageRequestCitationStyle
-from .doc_search_page_request_embedding_model import DocSearchPageRequestEmbeddingModel
-from .doc_search_page_request_keyword_query import DocSearchPageRequestKeywordQuery
-from .doc_search_page_request_response_format_type import DocSearchPageRequestResponseFormatType
-from .doc_search_page_request_selected_model import DocSearchPageRequestSelectedModel
-from .recipe_function import RecipeFunction
-from .run_settings import RunSettings
-
-
-class DocSearchPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- search_query: str
- keyword_query: typing.Optional[DocSearchPageRequestKeywordQuery] = None
- documents: typing.Optional[typing.List[str]] = None
- max_references: typing.Optional[int] = None
- max_context_words: typing.Optional[int] = None
- scroll_jump: typing.Optional[int] = None
- doc_extract_url: typing.Optional[str] = None
- embedding_model: typing.Optional[DocSearchPageRequestEmbeddingModel] = None
- dense_weight: typing.Optional[float] = pydantic.Field(default=None)
- """
- Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
- Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
- """
-
- task_instructions: typing.Optional[str] = None
- query_instructions: typing.Optional[str] = None
- selected_model: typing.Optional[DocSearchPageRequestSelectedModel] = None
- citation_style: typing.Optional[DocSearchPageRequestCitationStyle] = None
- avoid_repetition: typing.Optional[bool] = None
- num_outputs: typing.Optional[int] = None
- quality: typing.Optional[float] = None
- max_tokens: typing.Optional[int] = None
- sampling_temperature: typing.Optional[float] = None
- response_format_type: typing.Optional[DocSearchPageRequestResponseFormatType] = None
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/doc_search_page_request_response_format_type.py b/src/gooey/types/doc_search_page_request_response_format_type.py
deleted file mode 100644
index 856b641..0000000
--- a/src/gooey/types/doc_search_page_request_response_format_type.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-DocSearchPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/types/doc_search_page_response.py b/src/gooey/types/doc_search_page_response.py
deleted file mode 100644
index f7f7cb9..0000000
--- a/src/gooey/types/doc_search_page_response.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .doc_search_page_output import DocSearchPageOutput
-
-
-class DocSearchPageResponse(UniversalBaseModel):
- id: str = pydantic.Field()
- """
- Unique ID for this run
- """
-
- url: str = pydantic.Field()
- """
- Web URL for this run
- """
-
- created_at: str = pydantic.Field()
- """
- Time when the run was created as ISO format
- """
-
- output: DocSearchPageOutput = pydantic.Field()
- """
- Output of the run
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/doc_summary_page_request.py b/src/gooey/types/doc_summary_page_request.py
deleted file mode 100644
index cb112fc..0000000
--- a/src/gooey/types/doc_summary_page_request.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .doc_summary_page_request_response_format_type import DocSummaryPageRequestResponseFormatType
-from .doc_summary_page_request_selected_asr_model import DocSummaryPageRequestSelectedAsrModel
-from .doc_summary_page_request_selected_model import DocSummaryPageRequestSelectedModel
-from .recipe_function import RecipeFunction
-from .run_settings import RunSettings
-
-
-class DocSummaryPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- documents: typing.List[str]
- task_instructions: typing.Optional[str] = None
- merge_instructions: typing.Optional[str] = None
- selected_model: typing.Optional[DocSummaryPageRequestSelectedModel] = None
- chain_type: typing.Optional[typing.Literal["map_reduce"]] = None
- selected_asr_model: typing.Optional[DocSummaryPageRequestSelectedAsrModel] = None
- google_translate_target: typing.Optional[str] = None
- avoid_repetition: typing.Optional[bool] = None
- num_outputs: typing.Optional[int] = None
- quality: typing.Optional[float] = None
- max_tokens: typing.Optional[int] = None
- sampling_temperature: typing.Optional[float] = None
- response_format_type: typing.Optional[DocSummaryPageRequestResponseFormatType] = None
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/doc_summary_page_request_response_format_type.py b/src/gooey/types/doc_summary_page_request_response_format_type.py
deleted file mode 100644
index 318ad7f..0000000
--- a/src/gooey/types/doc_summary_page_request_response_format_type.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-DocSummaryPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/types/doc_summary_page_request_selected_model.py b/src/gooey/types/doc_summary_page_request_selected_model.py
deleted file mode 100644
index 6da70f6..0000000
--- a/src/gooey/types/doc_summary_page_request_selected_model.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-DocSummaryPageRequestSelectedModel = typing.Union[
- typing.Literal[
- "gpt_4_o",
- "gpt_4_o_mini",
- "chatgpt_4_o",
- "gpt_4_turbo_vision",
- "gpt_4_vision",
- "gpt_4_turbo",
- "gpt_4",
- "gpt_4_32k",
- "gpt_3_5_turbo",
- "gpt_3_5_turbo_16k",
- "gpt_3_5_turbo_instruct",
- "llama3_70b",
- "llama_3_groq_70b_tool_use",
- "llama3_8b",
- "llama_3_groq_8b_tool_use",
- "llama2_70b_chat",
- "mixtral_8x7b_instruct_0_1",
- "gemma_2_9b_it",
- "gemma_7b_it",
- "gemini_1_5_flash",
- "gemini_1_5_pro",
- "gemini_1_pro_vision",
- "gemini_1_pro",
- "palm2_chat",
- "palm2_text",
- "claude_3_5_sonnet",
- "claude_3_opus",
- "claude_3_sonnet",
- "claude_3_haiku",
- "sea_lion_7b_instruct",
- "llama3_8b_cpt_sea_lion_v2_instruct",
- "sarvam_2b",
- "text_davinci_003",
- "text_davinci_002",
- "code_davinci_002",
- "text_curie_001",
- "text_babbage_001",
- "text_ada_001",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/doc_summary_page_response.py b/src/gooey/types/doc_summary_page_response.py
deleted file mode 100644
index 854a6c4..0000000
--- a/src/gooey/types/doc_summary_page_response.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .doc_summary_page_output import DocSummaryPageOutput
-
-
-class DocSummaryPageResponse(UniversalBaseModel):
- id: str = pydantic.Field()
- """
- Unique ID for this run
- """
-
- url: str = pydantic.Field()
- """
- Web URL for this run
- """
-
- created_at: str = pydantic.Field()
- """
- Time when the run was created as ISO format
- """
-
- output: DocSummaryPageOutput = pydantic.Field()
- """
- Output of the run
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/doc_summary_request_response_format_type.py b/src/gooey/types/doc_summary_request_response_format_type.py
new file mode 100644
index 0000000..8fabf9b
--- /dev/null
+++ b/src/gooey/types/doc_summary_request_response_format_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+DocSummaryRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/types/video_bots_page_request_asr_model.py b/src/gooey/types/doc_summary_request_selected_asr_model.py
similarity index 90%
rename from src/gooey/types/video_bots_page_request_asr_model.py
rename to src/gooey/types/doc_summary_request_selected_asr_model.py
index 7db13bc..8b8a338 100644
--- a/src/gooey/types/video_bots_page_request_asr_model.py
+++ b/src/gooey/types/doc_summary_request_selected_asr_model.py
@@ -2,7 +2,7 @@
import typing
-VideoBotsPageRequestAsrModel = typing.Union[
+DocSummaryRequestSelectedAsrModel = typing.Union[
typing.Literal[
"whisper_large_v2",
"whisper_large_v3",
diff --git a/src/gooey/copilot_integrations/types/create_stream_request_selected_model.py b/src/gooey/types/doc_summary_request_selected_model.py
similarity index 95%
rename from src/gooey/copilot_integrations/types/create_stream_request_selected_model.py
rename to src/gooey/types/doc_summary_request_selected_model.py
index 7227a94..db13c45 100644
--- a/src/gooey/copilot_integrations/types/create_stream_request_selected_model.py
+++ b/src/gooey/types/doc_summary_request_selected_model.py
@@ -2,7 +2,7 @@
import typing
-CreateStreamRequestSelectedModel = typing.Union[
+DocSummaryRequestSelectedModel = typing.Union[
typing.Literal[
"gpt_4_o",
"gpt_4_o_mini",
diff --git a/src/gooey/types/email_face_inpainting_page_request.py b/src/gooey/types/email_face_inpainting_page_request.py
deleted file mode 100644
index 07f4660..0000000
--- a/src/gooey/types/email_face_inpainting_page_request.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .email_face_inpainting_page_request_selected_model import EmailFaceInpaintingPageRequestSelectedModel
-from .recipe_function import RecipeFunction
-from .run_settings import RunSettings
-
-
-class EmailFaceInpaintingPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- email_address: typing.Optional[str] = None
- twitter_handle: typing.Optional[str] = None
- text_prompt: str
- face_scale: typing.Optional[float] = None
- face_pos_x: typing.Optional[float] = None
- face_pos_y: typing.Optional[float] = None
- selected_model: typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] = None
- negative_prompt: typing.Optional[str] = None
- num_outputs: typing.Optional[int] = None
- quality: typing.Optional[int] = None
- upscale_factor: typing.Optional[float] = None
- output_width: typing.Optional[int] = None
- output_height: typing.Optional[int] = None
- guidance_scale: typing.Optional[float] = None
- should_send_email: typing.Optional[bool] = None
- email_from: typing.Optional[str] = None
- email_cc: typing.Optional[str] = None
- email_bcc: typing.Optional[str] = None
- email_subject: typing.Optional[str] = None
- email_body: typing.Optional[str] = None
- email_body_enable_html: typing.Optional[bool] = None
- fallback_email_body: typing.Optional[str] = None
- seed: typing.Optional[int] = None
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/email_face_inpainting_page_request_selected_model.py b/src/gooey/types/email_face_inpainting_page_request_selected_model.py
deleted file mode 100644
index 822b5a6..0000000
--- a/src/gooey/types/email_face_inpainting_page_request_selected_model.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-EmailFaceInpaintingPageRequestSelectedModel = typing.Union[
- typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any
-]
diff --git a/src/gooey/types/email_face_inpainting_page_response.py b/src/gooey/types/email_face_inpainting_page_response.py
deleted file mode 100644
index 11e62fc..0000000
--- a/src/gooey/types/email_face_inpainting_page_response.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .email_face_inpainting_page_output import EmailFaceInpaintingPageOutput
-
-
-class EmailFaceInpaintingPageResponse(UniversalBaseModel):
- id: str = pydantic.Field()
- """
- Unique ID for this run
- """
-
- url: str = pydantic.Field()
- """
- Web URL for this run
- """
-
- created_at: str = pydantic.Field()
- """
- Time when the run was created as ISO format
- """
-
- output: EmailFaceInpaintingPageOutput = pydantic.Field()
- """
- Output of the run
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/copilot_integrations/types/create_stream_request_embedding_model.py b/src/gooey/types/embed_request_selected_model.py
similarity index 87%
rename from src/gooey/copilot_integrations/types/create_stream_request_embedding_model.py
rename to src/gooey/types/embed_request_selected_model.py
index cef26bf..91f89cd 100644
--- a/src/gooey/copilot_integrations/types/create_stream_request_embedding_model.py
+++ b/src/gooey/types/embed_request_selected_model.py
@@ -2,7 +2,7 @@
import typing
-CreateStreamRequestEmbeddingModel = typing.Union[
+EmbedRequestSelectedModel = typing.Union[
typing.Literal[
"openai_3_large",
"openai_3_small",
diff --git a/src/gooey/types/embeddings_page_request.py b/src/gooey/types/embeddings_page_request.py
deleted file mode 100644
index 9e67171..0000000
--- a/src/gooey/types/embeddings_page_request.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .embeddings_page_request_selected_model import EmbeddingsPageRequestSelectedModel
-from .recipe_function import RecipeFunction
-from .run_settings import RunSettings
-
-
-class EmbeddingsPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- texts: typing.List[str]
- selected_model: typing.Optional[EmbeddingsPageRequestSelectedModel] = None
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/embeddings_page_response.py b/src/gooey/types/embeddings_page_response.py
deleted file mode 100644
index 8e49bfa..0000000
--- a/src/gooey/types/embeddings_page_response.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .embeddings_page_output import EmbeddingsPageOutput
-
-
-class EmbeddingsPageResponse(UniversalBaseModel):
- id: str = pydantic.Field()
- """
- Unique ID for this run
- """
-
- url: str = pydantic.Field()
- """
- Web URL for this run
- """
-
- created_at: str = pydantic.Field()
- """
- Time when the run was created as ISO format
- """
-
- output: EmbeddingsPageOutput = pydantic.Field()
- """
- Output of the run
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/face_inpainting_page_request.py b/src/gooey/types/face_inpainting_page_request.py
deleted file mode 100644
index 868b53b..0000000
--- a/src/gooey/types/face_inpainting_page_request.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .face_inpainting_page_request_selected_model import FaceInpaintingPageRequestSelectedModel
-from .recipe_function import RecipeFunction
-from .run_settings import RunSettings
-
-
-class FaceInpaintingPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- input_image: str
- text_prompt: str
- face_scale: typing.Optional[float] = None
- face_pos_x: typing.Optional[float] = None
- face_pos_y: typing.Optional[float] = None
- selected_model: typing.Optional[FaceInpaintingPageRequestSelectedModel] = None
- negative_prompt: typing.Optional[str] = None
- num_outputs: typing.Optional[int] = None
- quality: typing.Optional[int] = None
- upscale_factor: typing.Optional[float] = None
- output_width: typing.Optional[int] = None
- output_height: typing.Optional[int] = None
- guidance_scale: typing.Optional[float] = None
- seed: typing.Optional[int] = None
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/face_inpainting_page_response.py b/src/gooey/types/face_inpainting_page_response.py
deleted file mode 100644
index ebf2c0b..0000000
--- a/src/gooey/types/face_inpainting_page_response.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .face_inpainting_page_output import FaceInpaintingPageOutput
-
-
-class FaceInpaintingPageResponse(UniversalBaseModel):
- id: str = pydantic.Field()
- """
- Unique ID for this run
- """
-
- url: str = pydantic.Field()
- """
- Web URL for this run
- """
-
- created_at: str = pydantic.Field()
- """
- Time when the run was created as ISO format
- """
-
- output: FaceInpaintingPageOutput = pydantic.Field()
- """
- Output of the run
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/functions_page_request.py b/src/gooey/types/functions_page_request.py
deleted file mode 100644
index 30406dd..0000000
--- a/src/gooey/types/functions_page_request.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .run_settings import RunSettings
-
-
-class FunctionsPageRequest(UniversalBaseModel):
- code: typing.Optional[str] = pydantic.Field(default=None)
- """
- The JS code to be executed.
- """
-
- variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
- """
- Variables to be used in the code
- """
-
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/functions_page_response.py b/src/gooey/types/functions_page_response.py
deleted file mode 100644
index 8f9584a..0000000
--- a/src/gooey/types/functions_page_response.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .functions_page_output import FunctionsPageOutput
-
-
-class FunctionsPageResponse(UniversalBaseModel):
- id: str = pydantic.Field()
- """
- Unique ID for this run
- """
-
- url: str = pydantic.Field()
- """
- Web URL for this run
- """
-
- created_at: str = pydantic.Field()
- """
- Time when the run was created as ISO format
- """
-
- output: FunctionsPageOutput = pydantic.Field()
- """
- Output of the run
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/google_gpt_page_request.py b/src/gooey/types/google_gpt_page_request.py
deleted file mode 100644
index 9def494..0000000
--- a/src/gooey/types/google_gpt_page_request.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .google_gpt_page_request_embedding_model import GoogleGptPageRequestEmbeddingModel
-from .google_gpt_page_request_response_format_type import GoogleGptPageRequestResponseFormatType
-from .google_gpt_page_request_selected_model import GoogleGptPageRequestSelectedModel
-from .recipe_function import RecipeFunction
-from .run_settings import RunSettings
-from .serp_search_location import SerpSearchLocation
-from .serp_search_type import SerpSearchType
-
-
-class GoogleGptPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- search_query: str
- site_filter: str
- task_instructions: typing.Optional[str] = None
- query_instructions: typing.Optional[str] = None
- selected_model: typing.Optional[GoogleGptPageRequestSelectedModel] = None
- max_search_urls: typing.Optional[int] = None
- max_references: typing.Optional[int] = None
- max_context_words: typing.Optional[int] = None
- scroll_jump: typing.Optional[int] = None
- embedding_model: typing.Optional[GoogleGptPageRequestEmbeddingModel] = None
- dense_weight: typing.Optional[float] = pydantic.Field(default=None)
- """
- Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
- Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
- """
-
- avoid_repetition: typing.Optional[bool] = None
- num_outputs: typing.Optional[int] = None
- quality: typing.Optional[float] = None
- max_tokens: typing.Optional[int] = None
- sampling_temperature: typing.Optional[float] = None
- response_format_type: typing.Optional[GoogleGptPageRequestResponseFormatType] = None
- serp_search_location: typing.Optional[SerpSearchLocation] = None
- scaleserp_locations: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
- """
- DEPRECATED: use `serp_search_location` instead
- """
-
- serp_search_type: typing.Optional[SerpSearchType] = None
- scaleserp_search_field: typing.Optional[str] = pydantic.Field(default=None)
- """
- DEPRECATED: use `serp_search_type` instead
- """
-
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/google_gpt_page_request_response_format_type.py b/src/gooey/types/google_gpt_page_request_response_format_type.py
deleted file mode 100644
index dd04dec..0000000
--- a/src/gooey/types/google_gpt_page_request_response_format_type.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-GoogleGptPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/types/google_gpt_page_response.py b/src/gooey/types/google_gpt_page_response.py
deleted file mode 100644
index 8bee3b3..0000000
--- a/src/gooey/types/google_gpt_page_response.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .google_gpt_page_output import GoogleGptPageOutput
-
-
-class GoogleGptPageResponse(UniversalBaseModel):
- id: str = pydantic.Field()
- """
- Unique ID for this run
- """
-
- url: str = pydantic.Field()
- """
- Web URL for this run
- """
-
- created_at: str = pydantic.Field()
- """
- Time when the run was created as ISO format
- """
-
- output: GoogleGptPageOutput = pydantic.Field()
- """
- Output of the run
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/google_image_gen_page_request.py b/src/gooey/types/google_image_gen_page_request.py
deleted file mode 100644
index 8e1360b..0000000
--- a/src/gooey/types/google_image_gen_page_request.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .google_image_gen_page_request_selected_model import GoogleImageGenPageRequestSelectedModel
-from .recipe_function import RecipeFunction
-from .run_settings import RunSettings
-from .serp_search_location import SerpSearchLocation
-
-
-class GoogleImageGenPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- serp_search_location: typing.Optional[SerpSearchLocation] = None
- scaleserp_locations: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
- """
- DEPRECATED: use `serp_search_location` instead
- """
-
- search_query: str
- text_prompt: str
- selected_model: typing.Optional[GoogleImageGenPageRequestSelectedModel] = None
- negative_prompt: typing.Optional[str] = None
- num_outputs: typing.Optional[int] = None
- quality: typing.Optional[int] = None
- guidance_scale: typing.Optional[float] = None
- prompt_strength: typing.Optional[float] = None
- sd2upscaling: typing.Optional[bool] = pydantic.Field(alias="sd_2_upscaling", default=None)
- seed: typing.Optional[int] = None
- image_guidance_scale: typing.Optional[float] = None
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/google_image_gen_page_response.py b/src/gooey/types/google_image_gen_page_response.py
deleted file mode 100644
index bf820ef..0000000
--- a/src/gooey/types/google_image_gen_page_response.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .google_image_gen_page_output import GoogleImageGenPageOutput
-
-
-class GoogleImageGenPageResponse(UniversalBaseModel):
- id: str = pydantic.Field()
- """
- Unique ID for this run
- """
-
- url: str = pydantic.Field()
- """
- Web URL for this run
- """
-
- created_at: str = pydantic.Field()
- """
- Time when the run was created as ISO format
- """
-
- output: GoogleImageGenPageOutput = pydantic.Field()
- """
- Output of the run
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/face_inpainting_page_request_selected_model.py b/src/gooey/types/image_from_email_request_selected_model.py
similarity index 74%
rename from src/gooey/types/face_inpainting_page_request_selected_model.py
rename to src/gooey/types/image_from_email_request_selected_model.py
index 9b8eab6..ba5bb3f 100644
--- a/src/gooey/types/face_inpainting_page_request_selected_model.py
+++ b/src/gooey/types/image_from_email_request_selected_model.py
@@ -2,6 +2,6 @@
import typing
-FaceInpaintingPageRequestSelectedModel = typing.Union[
+ImageFromEmailRequestSelectedModel = typing.Union[
typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any
]
diff --git a/src/gooey/types/google_image_gen_page_request_selected_model.py b/src/gooey/types/image_from_web_search_request_selected_model.py
similarity index 88%
rename from src/gooey/types/google_image_gen_page_request_selected_model.py
rename to src/gooey/types/image_from_web_search_request_selected_model.py
index c872962..f4d498f 100644
--- a/src/gooey/types/google_image_gen_page_request_selected_model.py
+++ b/src/gooey/types/image_from_web_search_request_selected_model.py
@@ -2,7 +2,7 @@
import typing
-GoogleImageGenPageRequestSelectedModel = typing.Union[
+ImageFromWebSearchRequestSelectedModel = typing.Union[
typing.Literal[
"dream_shaper",
"dreamlike_2",
diff --git a/src/gooey/types/image_segmentation_page_request.py b/src/gooey/types/image_segmentation_page_request.py
deleted file mode 100644
index 9f2bc39..0000000
--- a/src/gooey/types/image_segmentation_page_request.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .image_segmentation_page_request_selected_model import ImageSegmentationPageRequestSelectedModel
-from .recipe_function import RecipeFunction
-from .run_settings import RunSettings
-
-
-class ImageSegmentationPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- input_image: str
- selected_model: typing.Optional[ImageSegmentationPageRequestSelectedModel] = None
- mask_threshold: typing.Optional[float] = None
- rect_persepective_transform: typing.Optional[bool] = None
- reflection_opacity: typing.Optional[float] = None
- obj_scale: typing.Optional[float] = None
- obj_pos_x: typing.Optional[float] = None
- obj_pos_y: typing.Optional[float] = None
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/image_segmentation_page_request_selected_model.py b/src/gooey/types/image_segmentation_page_request_selected_model.py
deleted file mode 100644
index 9b4b8d7..0000000
--- a/src/gooey/types/image_segmentation_page_request_selected_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-ImageSegmentationPageRequestSelectedModel = typing.Union[typing.Literal["dis", "u2net"], typing.Any]
diff --git a/src/gooey/types/image_segmentation_page_response.py b/src/gooey/types/image_segmentation_page_response.py
deleted file mode 100644
index 02a0c2e..0000000
--- a/src/gooey/types/image_segmentation_page_response.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .image_segmentation_page_output import ImageSegmentationPageOutput
-
-
-class ImageSegmentationPageResponse(UniversalBaseModel):
- id: str = pydantic.Field()
- """
- Unique ID for this run
- """
-
- url: str = pydantic.Field()
- """
- Web URL for this run
- """
-
- created_at: str = pydantic.Field()
- """
- Time when the run was created as ISO format
- """
-
- output: ImageSegmentationPageOutput = pydantic.Field()
- """
- Output of the run
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/img2img_page_request.py b/src/gooey/types/img2img_page_request.py
deleted file mode 100644
index 818cecb..0000000
--- a/src/gooey/types/img2img_page_request.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .img2img_page_request_selected_controlnet_model import Img2ImgPageRequestSelectedControlnetModel
-from .img2img_page_request_selected_model import Img2ImgPageRequestSelectedModel
-from .recipe_function import RecipeFunction
-from .run_settings import RunSettings
-
-
-class Img2ImgPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- input_image: str
- text_prompt: typing.Optional[str] = None
- selected_model: typing.Optional[Img2ImgPageRequestSelectedModel] = None
- selected_controlnet_model: typing.Optional[Img2ImgPageRequestSelectedControlnetModel] = None
- negative_prompt: typing.Optional[str] = None
- num_outputs: typing.Optional[int] = None
- quality: typing.Optional[int] = None
- output_width: typing.Optional[int] = None
- output_height: typing.Optional[int] = None
- guidance_scale: typing.Optional[float] = None
- prompt_strength: typing.Optional[float] = None
- controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None
- seed: typing.Optional[int] = None
- image_guidance_scale: typing.Optional[float] = None
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/img2img_page_response.py b/src/gooey/types/img2img_page_response.py
deleted file mode 100644
index 65ef7ba..0000000
--- a/src/gooey/types/img2img_page_response.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .img2img_page_output import Img2ImgPageOutput
-
-
-class Img2ImgPageResponse(UniversalBaseModel):
- id: str = pydantic.Field()
- """
- Unique ID for this run
- """
-
- url: str = pydantic.Field()
- """
- Web URL for this run
- """
-
- created_at: str = pydantic.Field()
- """
- Time when the run was created as ISO format
- """
-
- output: Img2ImgPageOutput = pydantic.Field()
- """
- Output of the run
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/letter_writer_page_response.py b/src/gooey/types/letter_writer_page_response.py
deleted file mode 100644
index 58eefa9..0000000
--- a/src/gooey/types/letter_writer_page_response.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .letter_writer_page_output import LetterWriterPageOutput
-
-
-class LetterWriterPageResponse(UniversalBaseModel):
- id: str = pydantic.Field()
- """
- Unique ID for this run
- """
-
- url: str = pydantic.Field()
- """
- Web URL for this run
- """
-
- created_at: str = pydantic.Field()
- """
- Time when the run was created as ISO format
- """
-
- output: LetterWriterPageOutput = pydantic.Field()
- """
- Output of the run
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/lipsync_page_request.py b/src/gooey/types/lipsync_page_request.py
deleted file mode 100644
index 89840ab..0000000
--- a/src/gooey/types/lipsync_page_request.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .lipsync_page_request_selected_model import LipsyncPageRequestSelectedModel
-from .recipe_function import RecipeFunction
-from .run_settings import RunSettings
-from .sad_talker_settings import SadTalkerSettings
-
-
-class LipsyncPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- input_face: typing.Optional[str] = None
- face_padding_top: typing.Optional[int] = None
- face_padding_bottom: typing.Optional[int] = None
- face_padding_left: typing.Optional[int] = None
- face_padding_right: typing.Optional[int] = None
- sadtalker_settings: typing.Optional[SadTalkerSettings] = None
- selected_model: typing.Optional[LipsyncPageRequestSelectedModel] = None
- input_audio: typing.Optional[str] = None
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/lipsync_page_request_selected_model.py b/src/gooey/types/lipsync_page_request_selected_model.py
deleted file mode 100644
index da68ef8..0000000
--- a/src/gooey/types/lipsync_page_request_selected_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-LipsyncPageRequestSelectedModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any]
diff --git a/src/gooey/types/lipsync_page_response.py b/src/gooey/types/lipsync_page_response.py
deleted file mode 100644
index 5a17ebf..0000000
--- a/src/gooey/types/lipsync_page_response.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .lipsync_page_output import LipsyncPageOutput
-
-
-class LipsyncPageResponse(UniversalBaseModel):
- id: str = pydantic.Field()
- """
- Unique ID for this run
- """
-
- url: str = pydantic.Field()
- """
- Web URL for this run
- """
-
- created_at: str = pydantic.Field()
- """
- Time when the run was created as ISO format
- """
-
- output: LipsyncPageOutput = pydantic.Field()
- """
- Output of the run
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/lipsync_tts_page_request.py b/src/gooey/types/lipsync_tts_page_request.py
deleted file mode 100644
index 31cdcd5..0000000
--- a/src/gooey/types/lipsync_tts_page_request.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .lipsync_tts_page_request_openai_tts_model import LipsyncTtsPageRequestOpenaiTtsModel
-from .lipsync_tts_page_request_openai_voice_name import LipsyncTtsPageRequestOpenaiVoiceName
-from .lipsync_tts_page_request_selected_model import LipsyncTtsPageRequestSelectedModel
-from .lipsync_tts_page_request_tts_provider import LipsyncTtsPageRequestTtsProvider
-from .recipe_function import RecipeFunction
-from .run_settings import RunSettings
-from .sad_talker_settings import SadTalkerSettings
-
-
-class LipsyncTtsPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- text_prompt: str
- tts_provider: typing.Optional[LipsyncTtsPageRequestTtsProvider] = None
- uberduck_voice_name: typing.Optional[str] = None
- uberduck_speaking_rate: typing.Optional[float] = None
- google_voice_name: typing.Optional[str] = None
- google_speaking_rate: typing.Optional[float] = None
- google_pitch: typing.Optional[float] = None
- bark_history_prompt: typing.Optional[str] = None
- elevenlabs_voice_name: typing.Optional[str] = pydantic.Field(default=None)
- """
- Use `elevenlabs_voice_id` instead
- """
-
- elevenlabs_api_key: typing.Optional[str] = None
- elevenlabs_voice_id: typing.Optional[str] = None
- elevenlabs_model: typing.Optional[str] = None
- elevenlabs_stability: typing.Optional[float] = None
- elevenlabs_similarity_boost: typing.Optional[float] = None
- elevenlabs_style: typing.Optional[float] = None
- elevenlabs_speaker_boost: typing.Optional[bool] = None
- azure_voice_name: typing.Optional[str] = None
- openai_voice_name: typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] = None
- openai_tts_model: typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] = None
- input_face: typing.Optional[str] = None
- face_padding_top: typing.Optional[int] = None
- face_padding_bottom: typing.Optional[int] = None
- face_padding_left: typing.Optional[int] = None
- face_padding_right: typing.Optional[int] = None
- sadtalker_settings: typing.Optional[SadTalkerSettings] = None
- selected_model: typing.Optional[LipsyncTtsPageRequestSelectedModel] = None
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/lipsync_tts_page_request_openai_tts_model.py b/src/gooey/types/lipsync_tts_page_request_openai_tts_model.py
deleted file mode 100644
index 453ab4a..0000000
--- a/src/gooey/types/lipsync_tts_page_request_openai_tts_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-LipsyncTtsPageRequestOpenaiTtsModel = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any]
diff --git a/src/gooey/types/lipsync_tts_page_request_selected_model.py b/src/gooey/types/lipsync_tts_page_request_selected_model.py
deleted file mode 100644
index 538058b..0000000
--- a/src/gooey/types/lipsync_tts_page_request_selected_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-LipsyncTtsPageRequestSelectedModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any]
diff --git a/src/gooey/types/lipsync_tts_page_response.py b/src/gooey/types/lipsync_tts_page_response.py
deleted file mode 100644
index 111f049..0000000
--- a/src/gooey/types/lipsync_tts_page_response.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .lipsync_tts_page_output import LipsyncTtsPageOutput
-
-
-class LipsyncTtsPageResponse(UniversalBaseModel):
- id: str = pydantic.Field()
- """
- Unique ID for this run
- """
-
- url: str = pydantic.Field()
- """
- Web URL for this run
- """
-
- created_at: str = pydantic.Field()
- """
- Time when the run was created as ISO format
- """
-
- output: LipsyncTtsPageOutput = pydantic.Field()
- """
- Output of the run
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/lipsync_tts_request_openai_tts_model.py b/src/gooey/types/lipsync_tts_request_openai_tts_model.py
new file mode 100644
index 0000000..510dcfb
--- /dev/null
+++ b/src/gooey/types/lipsync_tts_request_openai_tts_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+LipsyncTtsRequestOpenaiTtsModel = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any]
diff --git a/src/gooey/copilot_integrations/types/create_stream_request_openai_voice_name.py b/src/gooey/types/lipsync_tts_request_openai_voice_name.py
similarity index 76%
rename from src/gooey/copilot_integrations/types/create_stream_request_openai_voice_name.py
rename to src/gooey/types/lipsync_tts_request_openai_voice_name.py
index 4f3dd7a..7ea601b 100644
--- a/src/gooey/copilot_integrations/types/create_stream_request_openai_voice_name.py
+++ b/src/gooey/types/lipsync_tts_request_openai_voice_name.py
@@ -2,6 +2,6 @@
import typing
-CreateStreamRequestOpenaiVoiceName = typing.Union[
+LipsyncTtsRequestOpenaiVoiceName = typing.Union[
typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any
]
diff --git a/src/gooey/types/lipsync_tts_request_selected_model.py b/src/gooey/types/lipsync_tts_request_selected_model.py
new file mode 100644
index 0000000..9ece5a9
--- /dev/null
+++ b/src/gooey/types/lipsync_tts_request_selected_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+LipsyncTtsRequestSelectedModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any]
diff --git a/src/gooey/copilot_integrations/types/create_stream_request_tts_provider.py b/src/gooey/types/lipsync_tts_request_tts_provider.py
similarity index 79%
rename from src/gooey/copilot_integrations/types/create_stream_request_tts_provider.py
rename to src/gooey/types/lipsync_tts_request_tts_provider.py
index cad602d..1a23fe3 100644
--- a/src/gooey/copilot_integrations/types/create_stream_request_tts_provider.py
+++ b/src/gooey/types/lipsync_tts_request_tts_provider.py
@@ -2,6 +2,6 @@
import typing
-CreateStreamRequestTtsProvider = typing.Union[
+LipsyncTtsRequestTtsProvider = typing.Union[
typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any
]
diff --git a/src/gooey/types/llm_request_response_format_type.py b/src/gooey/types/llm_request_response_format_type.py
new file mode 100644
index 0000000..aa0e5e2
--- /dev/null
+++ b/src/gooey/types/llm_request_response_format_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+LlmRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/types/smart_gpt_page_request_selected_model.py b/src/gooey/types/llm_request_selected_models_item.py
similarity index 95%
rename from src/gooey/types/smart_gpt_page_request_selected_model.py
rename to src/gooey/types/llm_request_selected_models_item.py
index 9142b8f..019f9b9 100644
--- a/src/gooey/types/smart_gpt_page_request_selected_model.py
+++ b/src/gooey/types/llm_request_selected_models_item.py
@@ -2,7 +2,7 @@
import typing
-SmartGptPageRequestSelectedModel = typing.Union[
+LlmRequestSelectedModelsItem = typing.Union[
typing.Literal[
"gpt_4_o",
"gpt_4_o_mini",
diff --git a/src/gooey/types/object_inpainting_page_request.py b/src/gooey/types/object_inpainting_page_request.py
deleted file mode 100644
index 3b1cbc5..0000000
--- a/src/gooey/types/object_inpainting_page_request.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .object_inpainting_page_request_selected_model import ObjectInpaintingPageRequestSelectedModel
-from .recipe_function import RecipeFunction
-from .run_settings import RunSettings
-
-
-class ObjectInpaintingPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- input_image: str
- text_prompt: str
- obj_scale: typing.Optional[float] = None
- obj_pos_x: typing.Optional[float] = None
- obj_pos_y: typing.Optional[float] = None
- mask_threshold: typing.Optional[float] = None
- selected_model: typing.Optional[ObjectInpaintingPageRequestSelectedModel] = None
- negative_prompt: typing.Optional[str] = None
- num_outputs: typing.Optional[int] = None
- quality: typing.Optional[int] = None
- output_width: typing.Optional[int] = None
- output_height: typing.Optional[int] = None
- guidance_scale: typing.Optional[float] = None
- sd2upscaling: typing.Optional[bool] = pydantic.Field(alias="sd_2_upscaling", default=None)
- seed: typing.Optional[int] = None
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/object_inpainting_page_request_selected_model.py b/src/gooey/types/object_inpainting_page_request_selected_model.py
deleted file mode 100644
index 92f1302..0000000
--- a/src/gooey/types/object_inpainting_page_request_selected_model.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-ObjectInpaintingPageRequestSelectedModel = typing.Union[
- typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any
-]
diff --git a/src/gooey/types/object_inpainting_page_response.py b/src/gooey/types/object_inpainting_page_response.py
deleted file mode 100644
index 0104eeb..0000000
--- a/src/gooey/types/object_inpainting_page_response.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .object_inpainting_page_output import ObjectInpaintingPageOutput
-
-
-class ObjectInpaintingPageResponse(UniversalBaseModel):
- id: str = pydantic.Field()
- """
- Unique ID for this run
- """
-
- url: str = pydantic.Field()
- """
- Web URL for this run
- """
-
- created_at: str = pydantic.Field()
- """
- Time when the run was created as ISO format
- """
-
- output: ObjectInpaintingPageOutput = pydantic.Field()
- """
- Output of the run
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/personalize_email_request_response_format_type.py b/src/gooey/types/personalize_email_request_response_format_type.py
new file mode 100644
index 0000000..1bedf2e
--- /dev/null
+++ b/src/gooey/types/personalize_email_request_response_format_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+PersonalizeEmailRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/types/doc_extract_page_request_selected_model.py b/src/gooey/types/personalize_email_request_selected_model.py
similarity index 95%
rename from src/gooey/types/doc_extract_page_request_selected_model.py
rename to src/gooey/types/personalize_email_request_selected_model.py
index 1872929..3a01b07 100644
--- a/src/gooey/types/doc_extract_page_request_selected_model.py
+++ b/src/gooey/types/personalize_email_request_selected_model.py
@@ -2,7 +2,7 @@
import typing
-DocExtractPageRequestSelectedModel = typing.Union[
+PersonalizeEmailRequestSelectedModel = typing.Union[
typing.Literal[
"gpt_4_o",
"gpt_4_o_mini",
diff --git a/src/gooey/types/portrait_request_selected_model.py b/src/gooey/types/portrait_request_selected_model.py
new file mode 100644
index 0000000..6c4a5ce
--- /dev/null
+++ b/src/gooey/types/portrait_request_selected_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+PortraitRequestSelectedModel = typing.Union[typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any]
diff --git a/src/gooey/types/product_image_request_selected_model.py b/src/gooey/types/product_image_request_selected_model.py
new file mode 100644
index 0000000..f1ce039
--- /dev/null
+++ b/src/gooey/types/product_image_request_selected_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ProductImageRequestSelectedModel = typing.Union[typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any]
diff --git a/src/gooey/types/qr_code_generator_page_request.py b/src/gooey/types/qr_code_generator_page_request.py
deleted file mode 100644
index 6ebb5c4..0000000
--- a/src/gooey/types/qr_code_generator_page_request.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .qr_code_generator_page_request_image_prompt_controlnet_models_item import (
- QrCodeGeneratorPageRequestImagePromptControlnetModelsItem,
-)
-from .qr_code_generator_page_request_scheduler import QrCodeGeneratorPageRequestScheduler
-from .qr_code_generator_page_request_selected_controlnet_model_item import (
- QrCodeGeneratorPageRequestSelectedControlnetModelItem,
-)
-from .qr_code_generator_page_request_selected_model import QrCodeGeneratorPageRequestSelectedModel
-from .recipe_function import RecipeFunction
-from .run_settings import RunSettings
-from .vcard import Vcard
-
-
-class QrCodeGeneratorPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- qr_code_data: typing.Optional[str] = None
- qr_code_input_image: typing.Optional[str] = None
- qr_code_vcard: typing.Optional[Vcard] = None
- qr_code_file: typing.Optional[str] = None
- use_url_shortener: typing.Optional[bool] = None
- text_prompt: str
- negative_prompt: typing.Optional[str] = None
- image_prompt: typing.Optional[str] = None
- image_prompt_controlnet_models: typing.Optional[
- typing.List[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]
- ] = None
- image_prompt_strength: typing.Optional[float] = None
- image_prompt_scale: typing.Optional[float] = None
- image_prompt_pos_x: typing.Optional[float] = None
- image_prompt_pos_y: typing.Optional[float] = None
- selected_model: typing.Optional[QrCodeGeneratorPageRequestSelectedModel] = None
- selected_controlnet_model: typing.Optional[
- typing.List[QrCodeGeneratorPageRequestSelectedControlnetModelItem]
- ] = None
- output_width: typing.Optional[int] = None
- output_height: typing.Optional[int] = None
- guidance_scale: typing.Optional[float] = None
- controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None
- num_outputs: typing.Optional[int] = None
- quality: typing.Optional[int] = None
- scheduler: typing.Optional[QrCodeGeneratorPageRequestScheduler] = None
- seed: typing.Optional[int] = None
- obj_scale: typing.Optional[float] = None
- obj_pos_x: typing.Optional[float] = None
- obj_pos_y: typing.Optional[float] = None
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/qr_code_generator_page_response.py b/src/gooey/types/qr_code_generator_page_response.py
deleted file mode 100644
index 5dbedbb..0000000
--- a/src/gooey/types/qr_code_generator_page_response.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .qr_code_generator_page_output import QrCodeGeneratorPageOutput
-
-
-class QrCodeGeneratorPageResponse(UniversalBaseModel):
- id: str = pydantic.Field()
- """
- Unique ID for this run
- """
-
- url: str = pydantic.Field()
- """
- Web URL for this run
- """
-
- created_at: str = pydantic.Field()
- """
- Time when the run was created as ISO format
- """
-
- output: QrCodeGeneratorPageOutput = pydantic.Field()
- """
- Output of the run
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/img2img_page_request_selected_controlnet_model_item.py b/src/gooey/types/qr_code_request_image_prompt_controlnet_models_item.py
similarity index 88%
rename from src/gooey/types/img2img_page_request_selected_controlnet_model_item.py
rename to src/gooey/types/qr_code_request_image_prompt_controlnet_models_item.py
index 1569cf5..3be2ab6 100644
--- a/src/gooey/types/img2img_page_request_selected_controlnet_model_item.py
+++ b/src/gooey/types/qr_code_request_image_prompt_controlnet_models_item.py
@@ -2,7 +2,7 @@
import typing
-Img2ImgPageRequestSelectedControlnetModelItem = typing.Union[
+QrCodeRequestImagePromptControlnetModelsItem = typing.Union[
typing.Literal[
"sd_controlnet_canny",
"sd_controlnet_depth",
diff --git a/src/gooey/types/qr_code_generator_page_request_scheduler.py b/src/gooey/types/qr_code_request_scheduler.py
similarity index 89%
rename from src/gooey/types/qr_code_generator_page_request_scheduler.py
rename to src/gooey/types/qr_code_request_scheduler.py
index e30308a..890b204 100644
--- a/src/gooey/types/qr_code_generator_page_request_scheduler.py
+++ b/src/gooey/types/qr_code_request_scheduler.py
@@ -2,7 +2,7 @@
import typing
-QrCodeGeneratorPageRequestScheduler = typing.Union[
+QrCodeRequestScheduler = typing.Union[
typing.Literal[
"singlestep_dpm_solver",
"multistep_dpm_solver",
diff --git a/src/gooey/types/qr_code_generator_page_request_selected_controlnet_model_item.py b/src/gooey/types/qr_code_request_selected_controlnet_model_item.py
similarity index 87%
rename from src/gooey/types/qr_code_generator_page_request_selected_controlnet_model_item.py
rename to src/gooey/types/qr_code_request_selected_controlnet_model_item.py
index c6f1967..c5cdc8d 100644
--- a/src/gooey/types/qr_code_generator_page_request_selected_controlnet_model_item.py
+++ b/src/gooey/types/qr_code_request_selected_controlnet_model_item.py
@@ -2,7 +2,7 @@
import typing
-QrCodeGeneratorPageRequestSelectedControlnetModelItem = typing.Union[
+QrCodeRequestSelectedControlnetModelItem = typing.Union[
typing.Literal[
"sd_controlnet_canny",
"sd_controlnet_depth",
diff --git a/src/gooey/types/qr_code_generator_page_request_selected_model.py b/src/gooey/types/qr_code_request_selected_model.py
similarity index 88%
rename from src/gooey/types/qr_code_generator_page_request_selected_model.py
rename to src/gooey/types/qr_code_request_selected_model.py
index 97282cb..7ea963c 100644
--- a/src/gooey/types/qr_code_generator_page_request_selected_model.py
+++ b/src/gooey/types/qr_code_request_selected_model.py
@@ -2,7 +2,7 @@
import typing
-QrCodeGeneratorPageRequestSelectedModel = typing.Union[
+QrCodeRequestSelectedModel = typing.Union[
typing.Literal[
"dream_shaper",
"dreamlike_2",
diff --git a/src/gooey/copilot_integrations/types/create_stream_request_citation_style.py b/src/gooey/types/rag_request_citation_style.py
similarity index 90%
rename from src/gooey/copilot_integrations/types/create_stream_request_citation_style.py
rename to src/gooey/types/rag_request_citation_style.py
index e57bab1..521a218 100644
--- a/src/gooey/copilot_integrations/types/create_stream_request_citation_style.py
+++ b/src/gooey/types/rag_request_citation_style.py
@@ -2,7 +2,7 @@
import typing
-CreateStreamRequestCitationStyle = typing.Union[
+RagRequestCitationStyle = typing.Union[
typing.Literal[
"number",
"title",
diff --git a/src/gooey/types/google_gpt_page_request_embedding_model.py b/src/gooey/types/rag_request_embedding_model.py
similarity index 87%
rename from src/gooey/types/google_gpt_page_request_embedding_model.py
rename to src/gooey/types/rag_request_embedding_model.py
index 66f060f..0b9fb13 100644
--- a/src/gooey/types/google_gpt_page_request_embedding_model.py
+++ b/src/gooey/types/rag_request_embedding_model.py
@@ -2,7 +2,7 @@
import typing
-GoogleGptPageRequestEmbeddingModel = typing.Union[
+RagRequestEmbeddingModel = typing.Union[
typing.Literal[
"openai_3_large",
"openai_3_small",
diff --git a/src/gooey/types/doc_search_page_request_keyword_query.py b/src/gooey/types/rag_request_keyword_query.py
similarity index 52%
rename from src/gooey/types/doc_search_page_request_keyword_query.py
rename to src/gooey/types/rag_request_keyword_query.py
index 8083b3d..894beca 100644
--- a/src/gooey/types/doc_search_page_request_keyword_query.py
+++ b/src/gooey/types/rag_request_keyword_query.py
@@ -2,4 +2,4 @@
import typing
-DocSearchPageRequestKeywordQuery = typing.Union[str, typing.List[str]]
+RagRequestKeywordQuery = typing.Union[str, typing.List[str]]
diff --git a/src/gooey/types/rag_request_response_format_type.py b/src/gooey/types/rag_request_response_format_type.py
new file mode 100644
index 0000000..76eae86
--- /dev/null
+++ b/src/gooey/types/rag_request_response_format_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+RagRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/types/bulk_eval_page_request_selected_model.py b/src/gooey/types/rag_request_selected_model.py
similarity index 95%
rename from src/gooey/types/bulk_eval_page_request_selected_model.py
rename to src/gooey/types/rag_request_selected_model.py
index 853cf33..8904215 100644
--- a/src/gooey/types/bulk_eval_page_request_selected_model.py
+++ b/src/gooey/types/rag_request_selected_model.py
@@ -2,7 +2,7 @@
import typing
-BulkEvalPageRequestSelectedModel = typing.Union[
+RagRequestSelectedModel = typing.Union[
typing.Literal[
"gpt_4_o",
"gpt_4_o_mini",
diff --git a/src/gooey/types/related_qn_a_doc_page_request.py b/src/gooey/types/related_qn_a_doc_page_request.py
deleted file mode 100644
index b898b4f..0000000
--- a/src/gooey/types/related_qn_a_doc_page_request.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .recipe_function import RecipeFunction
-from .related_qn_a_doc_page_request_citation_style import RelatedQnADocPageRequestCitationStyle
-from .related_qn_a_doc_page_request_embedding_model import RelatedQnADocPageRequestEmbeddingModel
-from .related_qn_a_doc_page_request_keyword_query import RelatedQnADocPageRequestKeywordQuery
-from .related_qn_a_doc_page_request_response_format_type import RelatedQnADocPageRequestResponseFormatType
-from .related_qn_a_doc_page_request_selected_model import RelatedQnADocPageRequestSelectedModel
-from .run_settings import RunSettings
-from .serp_search_location import SerpSearchLocation
-from .serp_search_type import SerpSearchType
-
-
-class RelatedQnADocPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- search_query: str
- keyword_query: typing.Optional[RelatedQnADocPageRequestKeywordQuery] = None
- documents: typing.Optional[typing.List[str]] = None
- max_references: typing.Optional[int] = None
- max_context_words: typing.Optional[int] = None
- scroll_jump: typing.Optional[int] = None
- doc_extract_url: typing.Optional[str] = None
- embedding_model: typing.Optional[RelatedQnADocPageRequestEmbeddingModel] = None
- dense_weight: typing.Optional[float] = pydantic.Field(default=None)
- """
- Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
- Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
- """
-
- task_instructions: typing.Optional[str] = None
- query_instructions: typing.Optional[str] = None
- selected_model: typing.Optional[RelatedQnADocPageRequestSelectedModel] = None
- citation_style: typing.Optional[RelatedQnADocPageRequestCitationStyle] = None
- avoid_repetition: typing.Optional[bool] = None
- num_outputs: typing.Optional[int] = None
- quality: typing.Optional[float] = None
- max_tokens: typing.Optional[int] = None
- sampling_temperature: typing.Optional[float] = None
- response_format_type: typing.Optional[RelatedQnADocPageRequestResponseFormatType] = None
- serp_search_location: typing.Optional[SerpSearchLocation] = None
- scaleserp_locations: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
- """
- DEPRECATED: use `serp_search_location` instead
- """
-
- serp_search_type: typing.Optional[SerpSearchType] = None
- scaleserp_search_field: typing.Optional[str] = pydantic.Field(default=None)
- """
- DEPRECATED: use `serp_search_type` instead
- """
-
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/related_qn_a_doc_page_request_embedding_model.py b/src/gooey/types/related_qn_a_doc_page_request_embedding_model.py
deleted file mode 100644
index 680bbb5..0000000
--- a/src/gooey/types/related_qn_a_doc_page_request_embedding_model.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-RelatedQnADocPageRequestEmbeddingModel = typing.Union[
- typing.Literal[
- "openai_3_large",
- "openai_3_small",
- "openai_ada_2",
- "e5_large_v2",
- "e5_base_v2",
- "multilingual_e5_base",
- "multilingual_e5_large",
- "gte_large",
- "gte_base",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/related_qn_a_doc_page_request_selected_model.py b/src/gooey/types/related_qn_a_doc_page_request_selected_model.py
deleted file mode 100644
index 2591cf1..0000000
--- a/src/gooey/types/related_qn_a_doc_page_request_selected_model.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-RelatedQnADocPageRequestSelectedModel = typing.Union[
- typing.Literal[
- "gpt_4_o",
- "gpt_4_o_mini",
- "chatgpt_4_o",
- "gpt_4_turbo_vision",
- "gpt_4_vision",
- "gpt_4_turbo",
- "gpt_4",
- "gpt_4_32k",
- "gpt_3_5_turbo",
- "gpt_3_5_turbo_16k",
- "gpt_3_5_turbo_instruct",
- "llama3_70b",
- "llama_3_groq_70b_tool_use",
- "llama3_8b",
- "llama_3_groq_8b_tool_use",
- "llama2_70b_chat",
- "mixtral_8x7b_instruct_0_1",
- "gemma_2_9b_it",
- "gemma_7b_it",
- "gemini_1_5_flash",
- "gemini_1_5_pro",
- "gemini_1_pro_vision",
- "gemini_1_pro",
- "palm2_chat",
- "palm2_text",
- "claude_3_5_sonnet",
- "claude_3_opus",
- "claude_3_sonnet",
- "claude_3_haiku",
- "sea_lion_7b_instruct",
- "llama3_8b_cpt_sea_lion_v2_instruct",
- "sarvam_2b",
- "text_davinci_003",
- "text_davinci_002",
- "code_davinci_002",
- "text_curie_001",
- "text_babbage_001",
- "text_ada_001",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/related_qn_a_doc_page_response.py b/src/gooey/types/related_qn_a_doc_page_response.py
deleted file mode 100644
index db98cc6..0000000
--- a/src/gooey/types/related_qn_a_doc_page_response.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .related_qn_a_doc_page_output import RelatedQnADocPageOutput
-
-
-class RelatedQnADocPageResponse(UniversalBaseModel):
- id: str = pydantic.Field()
- """
- Unique ID for this run
- """
-
- url: str = pydantic.Field()
- """
- Web URL for this run
- """
-
- created_at: str = pydantic.Field()
- """
- Time when the run was created as ISO format
- """
-
- output: RelatedQnADocPageOutput = pydantic.Field()
- """
- Output of the run
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/related_qn_a_page_request.py b/src/gooey/types/related_qn_a_page_request.py
deleted file mode 100644
index 3491f18..0000000
--- a/src/gooey/types/related_qn_a_page_request.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .recipe_function import RecipeFunction
-from .related_qn_a_page_request_embedding_model import RelatedQnAPageRequestEmbeddingModel
-from .related_qn_a_page_request_response_format_type import RelatedQnAPageRequestResponseFormatType
-from .related_qn_a_page_request_selected_model import RelatedQnAPageRequestSelectedModel
-from .run_settings import RunSettings
-from .serp_search_location import SerpSearchLocation
-from .serp_search_type import SerpSearchType
-
-
-class RelatedQnAPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- search_query: str
- site_filter: str
- task_instructions: typing.Optional[str] = None
- query_instructions: typing.Optional[str] = None
- selected_model: typing.Optional[RelatedQnAPageRequestSelectedModel] = None
- max_search_urls: typing.Optional[int] = None
- max_references: typing.Optional[int] = None
- max_context_words: typing.Optional[int] = None
- scroll_jump: typing.Optional[int] = None
- embedding_model: typing.Optional[RelatedQnAPageRequestEmbeddingModel] = None
- dense_weight: typing.Optional[float] = pydantic.Field(default=None)
- """
- Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
- Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
- """
-
- avoid_repetition: typing.Optional[bool] = None
- num_outputs: typing.Optional[int] = None
- quality: typing.Optional[float] = None
- max_tokens: typing.Optional[int] = None
- sampling_temperature: typing.Optional[float] = None
- response_format_type: typing.Optional[RelatedQnAPageRequestResponseFormatType] = None
- serp_search_location: typing.Optional[SerpSearchLocation] = None
- scaleserp_locations: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
- """
- DEPRECATED: use `serp_search_location` instead
- """
-
- serp_search_type: typing.Optional[SerpSearchType] = None
- scaleserp_search_field: typing.Optional[str] = pydantic.Field(default=None)
- """
- DEPRECATED: use `serp_search_type` instead
- """
-
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/related_qn_a_page_request_embedding_model.py b/src/gooey/types/related_qn_a_page_request_embedding_model.py
deleted file mode 100644
index a591920..0000000
--- a/src/gooey/types/related_qn_a_page_request_embedding_model.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-RelatedQnAPageRequestEmbeddingModel = typing.Union[
- typing.Literal[
- "openai_3_large",
- "openai_3_small",
- "openai_ada_2",
- "e5_large_v2",
- "e5_base_v2",
- "multilingual_e5_base",
- "multilingual_e5_large",
- "gte_large",
- "gte_base",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/related_qn_a_page_request_response_format_type.py b/src/gooey/types/related_qn_a_page_request_response_format_type.py
deleted file mode 100644
index 7bada87..0000000
--- a/src/gooey/types/related_qn_a_page_request_response_format_type.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-RelatedQnAPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/types/related_qn_a_page_request_selected_model.py b/src/gooey/types/related_qn_a_page_request_selected_model.py
deleted file mode 100644
index 211bdbc..0000000
--- a/src/gooey/types/related_qn_a_page_request_selected_model.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-RelatedQnAPageRequestSelectedModel = typing.Union[
- typing.Literal[
- "gpt_4_o",
- "gpt_4_o_mini",
- "chatgpt_4_o",
- "gpt_4_turbo_vision",
- "gpt_4_vision",
- "gpt_4_turbo",
- "gpt_4",
- "gpt_4_32k",
- "gpt_3_5_turbo",
- "gpt_3_5_turbo_16k",
- "gpt_3_5_turbo_instruct",
- "llama3_70b",
- "llama_3_groq_70b_tool_use",
- "llama3_8b",
- "llama_3_groq_8b_tool_use",
- "llama2_70b_chat",
- "mixtral_8x7b_instruct_0_1",
- "gemma_2_9b_it",
- "gemma_7b_it",
- "gemini_1_5_flash",
- "gemini_1_5_pro",
- "gemini_1_pro_vision",
- "gemini_1_pro",
- "palm2_chat",
- "palm2_text",
- "claude_3_5_sonnet",
- "claude_3_opus",
- "claude_3_sonnet",
- "claude_3_haiku",
- "sea_lion_7b_instruct",
- "llama3_8b_cpt_sea_lion_v2_instruct",
- "sarvam_2b",
- "text_davinci_003",
- "text_davinci_002",
- "code_davinci_002",
- "text_curie_001",
- "text_babbage_001",
- "text_ada_001",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/related_qn_a_page_response.py b/src/gooey/types/related_qn_a_page_response.py
deleted file mode 100644
index 907a9a2..0000000
--- a/src/gooey/types/related_qn_a_page_response.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .related_qn_a_page_output import RelatedQnAPageOutput
-
-
-class RelatedQnAPageResponse(UniversalBaseModel):
- id: str = pydantic.Field()
- """
- Unique ID for this run
- """
-
- url: str = pydantic.Field()
- """
- Web URL for this run
- """
-
- created_at: str = pydantic.Field()
- """
- Time when the run was created as ISO format
- """
-
- output: RelatedQnAPageOutput = pydantic.Field()
- """
- Output of the run
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/img2img_page_request_selected_controlnet_model.py b/src/gooey/types/remix_image_request_selected_controlnet_model.py
similarity index 71%
rename from src/gooey/types/img2img_page_request_selected_controlnet_model.py
rename to src/gooey/types/remix_image_request_selected_controlnet_model.py
index dc17cc4..1b60b48 100644
--- a/src/gooey/types/img2img_page_request_selected_controlnet_model.py
+++ b/src/gooey/types/remix_image_request_selected_controlnet_model.py
@@ -2,10 +2,10 @@
import typing
-from .img2img_page_request_selected_controlnet_model_item import Img2ImgPageRequestSelectedControlnetModelItem
+from .remix_image_request_selected_controlnet_model_item import RemixImageRequestSelectedControlnetModelItem
-Img2ImgPageRequestSelectedControlnetModel = typing.Union[
- typing.List[Img2ImgPageRequestSelectedControlnetModelItem],
+RemixImageRequestSelectedControlnetModel = typing.Union[
+ typing.List[RemixImageRequestSelectedControlnetModelItem],
typing.Literal["sd_controlnet_canny"],
typing.Literal["sd_controlnet_depth"],
typing.Literal["sd_controlnet_hed"],
diff --git a/src/gooey/types/qr_code_generator_page_request_image_prompt_controlnet_models_item.py b/src/gooey/types/remix_image_request_selected_controlnet_model_item.py
similarity index 86%
rename from src/gooey/types/qr_code_generator_page_request_image_prompt_controlnet_models_item.py
rename to src/gooey/types/remix_image_request_selected_controlnet_model_item.py
index 508e7e9..b4f3ff0 100644
--- a/src/gooey/types/qr_code_generator_page_request_image_prompt_controlnet_models_item.py
+++ b/src/gooey/types/remix_image_request_selected_controlnet_model_item.py
@@ -2,7 +2,7 @@
import typing
-QrCodeGeneratorPageRequestImagePromptControlnetModelsItem = typing.Union[
+RemixImageRequestSelectedControlnetModelItem = typing.Union[
typing.Literal[
"sd_controlnet_canny",
"sd_controlnet_depth",
diff --git a/src/gooey/types/img2img_page_request_selected_model.py b/src/gooey/types/remix_image_request_selected_model.py
similarity index 89%
rename from src/gooey/types/img2img_page_request_selected_model.py
rename to src/gooey/types/remix_image_request_selected_model.py
index 506c2b1..245d6b0 100644
--- a/src/gooey/types/img2img_page_request_selected_model.py
+++ b/src/gooey/types/remix_image_request_selected_model.py
@@ -2,7 +2,7 @@
import typing
-Img2ImgPageRequestSelectedModel = typing.Union[
+RemixImageRequestSelectedModel = typing.Union[
typing.Literal[
"dream_shaper",
"dreamlike_2",
diff --git a/src/gooey/types/remove_background_request_selected_model.py b/src/gooey/types/remove_background_request_selected_model.py
new file mode 100644
index 0000000..c84f0e7
--- /dev/null
+++ b/src/gooey/types/remove_background_request_selected_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+RemoveBackgroundRequestSelectedModel = typing.Union[typing.Literal["dis", "u2net"], typing.Any]
diff --git a/src/gooey/types/seo_content_request_response_format_type.py b/src/gooey/types/seo_content_request_response_format_type.py
new file mode 100644
index 0000000..8511b19
--- /dev/null
+++ b/src/gooey/types/seo_content_request_response_format_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+SeoContentRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/types/google_gpt_page_request_selected_model.py b/src/gooey/types/seo_content_request_selected_model.py
similarity index 95%
rename from src/gooey/types/google_gpt_page_request_selected_model.py
rename to src/gooey/types/seo_content_request_selected_model.py
index 719ae61..f2d129a 100644
--- a/src/gooey/types/google_gpt_page_request_selected_model.py
+++ b/src/gooey/types/seo_content_request_selected_model.py
@@ -2,7 +2,7 @@
import typing
-GoogleGptPageRequestSelectedModel = typing.Union[
+SeoContentRequestSelectedModel = typing.Union[
typing.Literal[
"gpt_4_o",
"gpt_4_o_mini",
diff --git a/src/gooey/types/related_qn_a_doc_page_request_citation_style.py b/src/gooey/types/seo_people_also_ask_doc_request_citation_style.py
similarity index 89%
rename from src/gooey/types/related_qn_a_doc_page_request_citation_style.py
rename to src/gooey/types/seo_people_also_ask_doc_request_citation_style.py
index b98f002..c5aaac3 100644
--- a/src/gooey/types/related_qn_a_doc_page_request_citation_style.py
+++ b/src/gooey/types/seo_people_also_ask_doc_request_citation_style.py
@@ -2,7 +2,7 @@
import typing
-RelatedQnADocPageRequestCitationStyle = typing.Union[
+SeoPeopleAlsoAskDocRequestCitationStyle = typing.Union[
typing.Literal[
"number",
"title",
diff --git a/src/gooey/types/seo_people_also_ask_doc_request_embedding_model.py b/src/gooey/types/seo_people_also_ask_doc_request_embedding_model.py
new file mode 100644
index 0000000..0628779
--- /dev/null
+++ b/src/gooey/types/seo_people_also_ask_doc_request_embedding_model.py
@@ -0,0 +1,18 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+SeoPeopleAlsoAskDocRequestEmbeddingModel = typing.Union[
+ typing.Literal[
+ "openai_3_large",
+ "openai_3_small",
+ "openai_ada_2",
+ "e5_large_v2",
+ "e5_base_v2",
+ "multilingual_e5_base",
+ "multilingual_e5_large",
+ "gte_large",
+ "gte_base",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/related_qn_a_doc_page_request_keyword_query.py b/src/gooey/types/seo_people_also_ask_doc_request_keyword_query.py
similarity index 50%
rename from src/gooey/types/related_qn_a_doc_page_request_keyword_query.py
rename to src/gooey/types/seo_people_also_ask_doc_request_keyword_query.py
index 4f35322..8ba6efb 100644
--- a/src/gooey/types/related_qn_a_doc_page_request_keyword_query.py
+++ b/src/gooey/types/seo_people_also_ask_doc_request_keyword_query.py
@@ -2,4 +2,4 @@
import typing
-RelatedQnADocPageRequestKeywordQuery = typing.Union[str, typing.List[str]]
+SeoPeopleAlsoAskDocRequestKeywordQuery = typing.Union[str, typing.List[str]]
diff --git a/src/gooey/types/seo_people_also_ask_doc_request_response_format_type.py b/src/gooey/types/seo_people_also_ask_doc_request_response_format_type.py
new file mode 100644
index 0000000..c205eff
--- /dev/null
+++ b/src/gooey/types/seo_people_also_ask_doc_request_response_format_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+SeoPeopleAlsoAskDocRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/types/compare_llm_page_request_selected_models_item.py b/src/gooey/types/seo_people_also_ask_doc_request_selected_model.py
similarity index 95%
rename from src/gooey/types/compare_llm_page_request_selected_models_item.py
rename to src/gooey/types/seo_people_also_ask_doc_request_selected_model.py
index d3564b6..1877420 100644
--- a/src/gooey/types/compare_llm_page_request_selected_models_item.py
+++ b/src/gooey/types/seo_people_also_ask_doc_request_selected_model.py
@@ -2,7 +2,7 @@
import typing
-CompareLlmPageRequestSelectedModelsItem = typing.Union[
+SeoPeopleAlsoAskDocRequestSelectedModel = typing.Union[
typing.Literal[
"gpt_4_o",
"gpt_4_o_mini",
diff --git a/src/gooey/types/doc_search_page_request_embedding_model.py b/src/gooey/types/seo_people_also_ask_request_embedding_model.py
similarity index 86%
rename from src/gooey/types/doc_search_page_request_embedding_model.py
rename to src/gooey/types/seo_people_also_ask_request_embedding_model.py
index fb35612..9ab6037 100644
--- a/src/gooey/types/doc_search_page_request_embedding_model.py
+++ b/src/gooey/types/seo_people_also_ask_request_embedding_model.py
@@ -2,7 +2,7 @@
import typing
-DocSearchPageRequestEmbeddingModel = typing.Union[
+SeoPeopleAlsoAskRequestEmbeddingModel = typing.Union[
typing.Literal[
"openai_3_large",
"openai_3_small",
diff --git a/src/gooey/types/seo_people_also_ask_request_response_format_type.py b/src/gooey/types/seo_people_also_ask_request_response_format_type.py
new file mode 100644
index 0000000..5a67007
--- /dev/null
+++ b/src/gooey/types/seo_people_also_ask_request_response_format_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+SeoPeopleAlsoAskRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/types/seo_people_also_ask_request_selected_model.py b/src/gooey/types/seo_people_also_ask_request_selected_model.py
new file mode 100644
index 0000000..e315d12
--- /dev/null
+++ b/src/gooey/types/seo_people_also_ask_request_selected_model.py
@@ -0,0 +1,47 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+SeoPeopleAlsoAskRequestSelectedModel = typing.Union[
+ typing.Literal[
+ "gpt_4_o",
+ "gpt_4_o_mini",
+ "chatgpt_4_o",
+ "gpt_4_turbo_vision",
+ "gpt_4_vision",
+ "gpt_4_turbo",
+ "gpt_4",
+ "gpt_4_32k",
+ "gpt_3_5_turbo",
+ "gpt_3_5_turbo_16k",
+ "gpt_3_5_turbo_instruct",
+ "llama3_70b",
+ "llama_3_groq_70b_tool_use",
+ "llama3_8b",
+ "llama_3_groq_8b_tool_use",
+ "llama2_70b_chat",
+ "mixtral_8x7b_instruct_0_1",
+ "gemma_2_9b_it",
+ "gemma_7b_it",
+ "gemini_1_5_flash",
+ "gemini_1_5_pro",
+ "gemini_1_pro_vision",
+ "gemini_1_pro",
+ "palm2_chat",
+ "palm2_text",
+ "claude_3_5_sonnet",
+ "claude_3_opus",
+ "claude_3_sonnet",
+ "claude_3_haiku",
+ "sea_lion_7b_instruct",
+ "llama3_8b_cpt_sea_lion_v2_instruct",
+ "sarvam_2b",
+ "text_davinci_003",
+ "text_davinci_002",
+ "code_davinci_002",
+ "text_curie_001",
+ "text_babbage_001",
+ "text_ada_001",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/seo_summary_page_request.py b/src/gooey/types/seo_summary_page_request.py
deleted file mode 100644
index 12121af..0000000
--- a/src/gooey/types/seo_summary_page_request.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .run_settings import RunSettings
-from .seo_summary_page_request_response_format_type import SeoSummaryPageRequestResponseFormatType
-from .seo_summary_page_request_selected_model import SeoSummaryPageRequestSelectedModel
-from .serp_search_location import SerpSearchLocation
-from .serp_search_type import SerpSearchType
-
-
-class SeoSummaryPageRequest(UniversalBaseModel):
- search_query: str
- keywords: str
- title: str
- company_url: str
- task_instructions: typing.Optional[str] = None
- enable_html: typing.Optional[bool] = None
- selected_model: typing.Optional[SeoSummaryPageRequestSelectedModel] = None
- max_search_urls: typing.Optional[int] = None
- enable_crosslinks: typing.Optional[bool] = None
- seed: typing.Optional[int] = None
- avoid_repetition: typing.Optional[bool] = None
- num_outputs: typing.Optional[int] = None
- quality: typing.Optional[float] = None
- max_tokens: typing.Optional[int] = None
- sampling_temperature: typing.Optional[float] = None
- response_format_type: typing.Optional[SeoSummaryPageRequestResponseFormatType] = None
- serp_search_location: typing.Optional[SerpSearchLocation] = None
- scaleserp_locations: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
- """
- DEPRECATED: use `serp_search_location` instead
- """
-
- serp_search_type: typing.Optional[SerpSearchType] = None
- scaleserp_search_field: typing.Optional[str] = pydantic.Field(default=None)
- """
- DEPRECATED: use `serp_search_type` instead
- """
-
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/seo_summary_page_request_response_format_type.py b/src/gooey/types/seo_summary_page_request_response_format_type.py
deleted file mode 100644
index 26f948b..0000000
--- a/src/gooey/types/seo_summary_page_request_response_format_type.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-SeoSummaryPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/types/seo_summary_page_request_selected_model.py b/src/gooey/types/seo_summary_page_request_selected_model.py
deleted file mode 100644
index 7030bfd..0000000
--- a/src/gooey/types/seo_summary_page_request_selected_model.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-SeoSummaryPageRequestSelectedModel = typing.Union[
- typing.Literal[
- "gpt_4_o",
- "gpt_4_o_mini",
- "chatgpt_4_o",
- "gpt_4_turbo_vision",
- "gpt_4_vision",
- "gpt_4_turbo",
- "gpt_4",
- "gpt_4_32k",
- "gpt_3_5_turbo",
- "gpt_3_5_turbo_16k",
- "gpt_3_5_turbo_instruct",
- "llama3_70b",
- "llama_3_groq_70b_tool_use",
- "llama3_8b",
- "llama_3_groq_8b_tool_use",
- "llama2_70b_chat",
- "mixtral_8x7b_instruct_0_1",
- "gemma_2_9b_it",
- "gemma_7b_it",
- "gemini_1_5_flash",
- "gemini_1_5_pro",
- "gemini_1_pro_vision",
- "gemini_1_pro",
- "palm2_chat",
- "palm2_text",
- "claude_3_5_sonnet",
- "claude_3_opus",
- "claude_3_sonnet",
- "claude_3_haiku",
- "sea_lion_7b_instruct",
- "llama3_8b_cpt_sea_lion_v2_instruct",
- "sarvam_2b",
- "text_davinci_003",
- "text_davinci_002",
- "code_davinci_002",
- "text_curie_001",
- "text_babbage_001",
- "text_ada_001",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/seo_summary_page_response.py b/src/gooey/types/seo_summary_page_response.py
deleted file mode 100644
index 41231e6..0000000
--- a/src/gooey/types/seo_summary_page_response.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .seo_summary_page_output import SeoSummaryPageOutput
-
-
-class SeoSummaryPageResponse(UniversalBaseModel):
- id: str = pydantic.Field()
- """
- Unique ID for this run
- """
-
- url: str = pydantic.Field()
- """
- Web URL for this run
- """
-
- created_at: str = pydantic.Field()
- """
- Time when the run was created as ISO format
- """
-
- output: SeoSummaryPageOutput = pydantic.Field()
- """
- Output of the run
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/smart_gpt_page_request.py b/src/gooey/types/smart_gpt_page_request.py
deleted file mode 100644
index ceedad9..0000000
--- a/src/gooey/types/smart_gpt_page_request.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .recipe_function import RecipeFunction
-from .run_settings import RunSettings
-from .smart_gpt_page_request_response_format_type import SmartGptPageRequestResponseFormatType
-from .smart_gpt_page_request_selected_model import SmartGptPageRequestSelectedModel
-
-
-class SmartGptPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- input_prompt: str
- cot_prompt: typing.Optional[str] = None
- reflexion_prompt: typing.Optional[str] = None
- dera_prompt: typing.Optional[str] = None
- selected_model: typing.Optional[SmartGptPageRequestSelectedModel] = None
- avoid_repetition: typing.Optional[bool] = None
- num_outputs: typing.Optional[int] = None
- quality: typing.Optional[float] = None
- max_tokens: typing.Optional[int] = None
- sampling_temperature: typing.Optional[float] = None
- response_format_type: typing.Optional[SmartGptPageRequestResponseFormatType] = None
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/smart_gpt_page_request_response_format_type.py b/src/gooey/types/smart_gpt_page_request_response_format_type.py
deleted file mode 100644
index 1eaf901..0000000
--- a/src/gooey/types/smart_gpt_page_request_response_format_type.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-SmartGptPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/types/smart_gpt_page_response.py b/src/gooey/types/smart_gpt_page_response.py
deleted file mode 100644
index 235b091..0000000
--- a/src/gooey/types/smart_gpt_page_response.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .smart_gpt_page_output import SmartGptPageOutput
-
-
-class SmartGptPageResponse(UniversalBaseModel):
- id: str = pydantic.Field()
- """
- Unique ID for this run
- """
-
- url: str = pydantic.Field()
- """
- Web URL for this run
- """
-
- created_at: str = pydantic.Field()
- """
- Time when the run was created as ISO format
- """
-
- output: SmartGptPageOutput = pydantic.Field()
- """
- Output of the run
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/social_lookup_email_page_request.py b/src/gooey/types/social_lookup_email_page_request.py
deleted file mode 100644
index 39bcef3..0000000
--- a/src/gooey/types/social_lookup_email_page_request.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .recipe_function import RecipeFunction
-from .run_settings import RunSettings
-from .social_lookup_email_page_request_response_format_type import SocialLookupEmailPageRequestResponseFormatType
-from .social_lookup_email_page_request_selected_model import SocialLookupEmailPageRequestSelectedModel
-
-
-class SocialLookupEmailPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- email_address: str
- input_prompt: typing.Optional[str] = None
- selected_model: typing.Optional[SocialLookupEmailPageRequestSelectedModel] = None
- avoid_repetition: typing.Optional[bool] = None
- num_outputs: typing.Optional[int] = None
- quality: typing.Optional[float] = None
- max_tokens: typing.Optional[int] = None
- sampling_temperature: typing.Optional[float] = None
- response_format_type: typing.Optional[SocialLookupEmailPageRequestResponseFormatType] = None
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/social_lookup_email_page_request_selected_model.py b/src/gooey/types/social_lookup_email_page_request_selected_model.py
deleted file mode 100644
index 1a0cba7..0000000
--- a/src/gooey/types/social_lookup_email_page_request_selected_model.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-SocialLookupEmailPageRequestSelectedModel = typing.Union[
- typing.Literal[
- "gpt_4_o",
- "gpt_4_o_mini",
- "chatgpt_4_o",
- "gpt_4_turbo_vision",
- "gpt_4_vision",
- "gpt_4_turbo",
- "gpt_4",
- "gpt_4_32k",
- "gpt_3_5_turbo",
- "gpt_3_5_turbo_16k",
- "gpt_3_5_turbo_instruct",
- "llama3_70b",
- "llama_3_groq_70b_tool_use",
- "llama3_8b",
- "llama_3_groq_8b_tool_use",
- "llama2_70b_chat",
- "mixtral_8x7b_instruct_0_1",
- "gemma_2_9b_it",
- "gemma_7b_it",
- "gemini_1_5_flash",
- "gemini_1_5_pro",
- "gemini_1_pro_vision",
- "gemini_1_pro",
- "palm2_chat",
- "palm2_text",
- "claude_3_5_sonnet",
- "claude_3_opus",
- "claude_3_sonnet",
- "claude_3_haiku",
- "sea_lion_7b_instruct",
- "llama3_8b_cpt_sea_lion_v2_instruct",
- "sarvam_2b",
- "text_davinci_003",
- "text_davinci_002",
- "code_davinci_002",
- "text_curie_001",
- "text_babbage_001",
- "text_ada_001",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/social_lookup_email_page_response.py b/src/gooey/types/social_lookup_email_page_response.py
deleted file mode 100644
index 825527a..0000000
--- a/src/gooey/types/social_lookup_email_page_response.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .social_lookup_email_page_output import SocialLookupEmailPageOutput
-
-
-class SocialLookupEmailPageResponse(UniversalBaseModel):
- id: str = pydantic.Field()
- """
- Unique ID for this run
- """
-
- url: str = pydantic.Field()
- """
- Web URL for this run
- """
-
- created_at: str = pydantic.Field()
- """
- Time when the run was created as ISO format
- """
-
- output: SocialLookupEmailPageOutput = pydantic.Field()
- """
- Output of the run
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/speech_recognition_request_output_format.py b/src/gooey/types/speech_recognition_request_output_format.py
new file mode 100644
index 0000000..4d2cf2b
--- /dev/null
+++ b/src/gooey/types/speech_recognition_request_output_format.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+SpeechRecognitionRequestOutputFormat = typing.Union[typing.Literal["text", "json", "srt", "vtt"], typing.Any]
diff --git a/src/gooey/types/doc_extract_page_request_selected_asr_model.py b/src/gooey/types/speech_recognition_request_selected_model.py
similarity index 89%
rename from src/gooey/types/doc_extract_page_request_selected_asr_model.py
rename to src/gooey/types/speech_recognition_request_selected_model.py
index a358400..9d2d28f 100644
--- a/src/gooey/types/doc_extract_page_request_selected_asr_model.py
+++ b/src/gooey/types/speech_recognition_request_selected_model.py
@@ -2,7 +2,7 @@
import typing
-DocExtractPageRequestSelectedAsrModel = typing.Union[
+SpeechRecognitionRequestSelectedModel = typing.Union[
typing.Literal[
"whisper_large_v2",
"whisper_large_v3",
diff --git a/src/gooey/types/speech_recognition_request_translation_model.py b/src/gooey/types/speech_recognition_request_translation_model.py
new file mode 100644
index 0000000..886ab92
--- /dev/null
+++ b/src/gooey/types/speech_recognition_request_translation_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+SpeechRecognitionRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any]
diff --git a/src/gooey/types/compare_llm_page_request_response_format_type.py b/src/gooey/types/synthesize_data_request_response_format_type.py
similarity index 65%
rename from src/gooey/types/compare_llm_page_request_response_format_type.py
rename to src/gooey/types/synthesize_data_request_response_format_type.py
index a846068..3ab37a9 100644
--- a/src/gooey/types/compare_llm_page_request_response_format_type.py
+++ b/src/gooey/types/synthesize_data_request_response_format_type.py
@@ -2,4 +2,4 @@
import typing
-CompareLlmPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
+SynthesizeDataRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/types/doc_summary_page_request_selected_asr_model.py b/src/gooey/types/synthesize_data_request_selected_asr_model.py
similarity index 89%
rename from src/gooey/types/doc_summary_page_request_selected_asr_model.py
rename to src/gooey/types/synthesize_data_request_selected_asr_model.py
index c04cc7a..6c1bc21 100644
--- a/src/gooey/types/doc_summary_page_request_selected_asr_model.py
+++ b/src/gooey/types/synthesize_data_request_selected_asr_model.py
@@ -2,7 +2,7 @@
import typing
-DocSummaryPageRequestSelectedAsrModel = typing.Union[
+SynthesizeDataRequestSelectedAsrModel = typing.Union[
typing.Literal[
"whisper_large_v2",
"whisper_large_v3",
diff --git a/src/gooey/types/doc_search_page_request_selected_model.py b/src/gooey/types/synthesize_data_request_selected_model.py
similarity index 95%
rename from src/gooey/types/doc_search_page_request_selected_model.py
rename to src/gooey/types/synthesize_data_request_selected_model.py
index 3b793b6..42bde95 100644
--- a/src/gooey/types/doc_search_page_request_selected_model.py
+++ b/src/gooey/types/synthesize_data_request_selected_model.py
@@ -2,7 +2,7 @@
import typing
-DocSearchPageRequestSelectedModel = typing.Union[
+SynthesizeDataRequestSelectedModel = typing.Union[
typing.Literal[
"gpt_4_o",
"gpt_4_o_mini",
diff --git a/src/gooey/types/text2audio_page_request.py b/src/gooey/types/text2audio_page_request.py
deleted file mode 100644
index f549c7e..0000000
--- a/src/gooey/types/text2audio_page_request.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .recipe_function import RecipeFunction
-from .run_settings import RunSettings
-
-
-class Text2AudioPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- text_prompt: str
- negative_prompt: typing.Optional[str] = None
- duration_sec: typing.Optional[float] = None
- num_outputs: typing.Optional[int] = None
- quality: typing.Optional[int] = None
- guidance_scale: typing.Optional[float] = None
- seed: typing.Optional[int] = None
- sd2upscaling: typing.Optional[bool] = pydantic.Field(alias="sd_2_upscaling", default=None)
- selected_models: typing.Optional[typing.List[typing.Literal["audio_ldm"]]] = None
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/text2audio_page_response.py b/src/gooey/types/text2audio_page_response.py
deleted file mode 100644
index e17927d..0000000
--- a/src/gooey/types/text2audio_page_response.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .text2audio_page_output import Text2AudioPageOutput
-
-
-class Text2AudioPageResponse(UniversalBaseModel):
- id: str = pydantic.Field()
- """
- Unique ID for this run
- """
-
- url: str = pydantic.Field()
- """
- Web URL for this run
- """
-
- created_at: str = pydantic.Field()
- """
- Time when the run was created as ISO format
- """
-
- output: Text2AudioPageOutput = pydantic.Field()
- """
- Output of the run
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/compare_text2img_page_request_scheduler.py b/src/gooey/types/text_to_image_request_scheduler.py
similarity index 89%
rename from src/gooey/types/compare_text2img_page_request_scheduler.py
rename to src/gooey/types/text_to_image_request_scheduler.py
index 29ce840..4283b8c 100644
--- a/src/gooey/types/compare_text2img_page_request_scheduler.py
+++ b/src/gooey/types/text_to_image_request_scheduler.py
@@ -2,7 +2,7 @@
import typing
-CompareText2ImgPageRequestScheduler = typing.Union[
+TextToImageRequestScheduler = typing.Union[
typing.Literal[
"singlestep_dpm_solver",
"multistep_dpm_solver",
diff --git a/src/gooey/types/compare_text2img_page_request_selected_models_item.py b/src/gooey/types/text_to_image_request_selected_models_item.py
similarity index 87%
rename from src/gooey/types/compare_text2img_page_request_selected_models_item.py
rename to src/gooey/types/text_to_image_request_selected_models_item.py
index 4154491..06aef80 100644
--- a/src/gooey/types/compare_text2img_page_request_selected_models_item.py
+++ b/src/gooey/types/text_to_image_request_selected_models_item.py
@@ -2,7 +2,7 @@
import typing
-CompareText2ImgPageRequestSelectedModelsItem = typing.Union[
+TextToImageRequestSelectedModelsItem = typing.Union[
typing.Literal[
"dream_shaper",
"dreamlike_2",
diff --git a/src/gooey/types/text_to_speech_page_request.py b/src/gooey/types/text_to_speech_page_request.py
deleted file mode 100644
index bdd5d95..0000000
--- a/src/gooey/types/text_to_speech_page_request.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .recipe_function import RecipeFunction
-from .run_settings import RunSettings
-from .text_to_speech_page_request_openai_tts_model import TextToSpeechPageRequestOpenaiTtsModel
-from .text_to_speech_page_request_openai_voice_name import TextToSpeechPageRequestOpenaiVoiceName
-from .text_to_speech_page_request_tts_provider import TextToSpeechPageRequestTtsProvider
-
-
-class TextToSpeechPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- text_prompt: str
- tts_provider: typing.Optional[TextToSpeechPageRequestTtsProvider] = None
- uberduck_voice_name: typing.Optional[str] = None
- uberduck_speaking_rate: typing.Optional[float] = None
- google_voice_name: typing.Optional[str] = None
- google_speaking_rate: typing.Optional[float] = None
- google_pitch: typing.Optional[float] = None
- bark_history_prompt: typing.Optional[str] = None
- elevenlabs_voice_name: typing.Optional[str] = pydantic.Field(default=None)
- """
- Use `elevenlabs_voice_id` instead
- """
-
- elevenlabs_api_key: typing.Optional[str] = None
- elevenlabs_voice_id: typing.Optional[str] = None
- elevenlabs_model: typing.Optional[str] = None
- elevenlabs_stability: typing.Optional[float] = None
- elevenlabs_similarity_boost: typing.Optional[float] = None
- elevenlabs_style: typing.Optional[float] = None
- elevenlabs_speaker_boost: typing.Optional[bool] = None
- azure_voice_name: typing.Optional[str] = None
- openai_voice_name: typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] = None
- openai_tts_model: typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] = None
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/text_to_speech_page_request_openai_tts_model.py b/src/gooey/types/text_to_speech_page_request_openai_tts_model.py
deleted file mode 100644
index 685dfff..0000000
--- a/src/gooey/types/text_to_speech_page_request_openai_tts_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-TextToSpeechPageRequestOpenaiTtsModel = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any]
diff --git a/src/gooey/types/text_to_speech_page_response.py b/src/gooey/types/text_to_speech_page_response.py
deleted file mode 100644
index bd00591..0000000
--- a/src/gooey/types/text_to_speech_page_response.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .text_to_speech_page_output import TextToSpeechPageOutput
-
-
-class TextToSpeechPageResponse(UniversalBaseModel):
- id: str = pydantic.Field()
- """
- Unique ID for this run
- """
-
- url: str = pydantic.Field()
- """
- Web URL for this run
- """
-
- created_at: str = pydantic.Field()
- """
- Time when the run was created as ISO format
- """
-
- output: TextToSpeechPageOutput = pydantic.Field()
- """
- Output of the run
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/copilot_integrations/types/create_stream_request_openai_tts_model.py b/src/gooey/types/text_to_speech_request_openai_tts_model.py
similarity index 64%
rename from src/gooey/copilot_integrations/types/create_stream_request_openai_tts_model.py
rename to src/gooey/types/text_to_speech_request_openai_tts_model.py
index 475ca67..29e0dbe 100644
--- a/src/gooey/copilot_integrations/types/create_stream_request_openai_tts_model.py
+++ b/src/gooey/types/text_to_speech_request_openai_tts_model.py
@@ -2,4 +2,4 @@
import typing
-CreateStreamRequestOpenaiTtsModel = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any]
+TextToSpeechRequestOpenaiTtsModel = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any]
diff --git a/src/gooey/types/video_bots_page_request_openai_voice_name.py b/src/gooey/types/text_to_speech_request_openai_voice_name.py
similarity index 76%
rename from src/gooey/types/video_bots_page_request_openai_voice_name.py
rename to src/gooey/types/text_to_speech_request_openai_voice_name.py
index a08f96c..495482a 100644
--- a/src/gooey/types/video_bots_page_request_openai_voice_name.py
+++ b/src/gooey/types/text_to_speech_request_openai_voice_name.py
@@ -2,6 +2,6 @@
import typing
-VideoBotsPageRequestOpenaiVoiceName = typing.Union[
+TextToSpeechRequestOpenaiVoiceName = typing.Union[
typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any
]
diff --git a/src/gooey/types/video_bots_page_request_tts_provider.py b/src/gooey/types/text_to_speech_request_tts_provider.py
similarity index 79%
rename from src/gooey/types/video_bots_page_request_tts_provider.py
rename to src/gooey/types/text_to_speech_request_tts_provider.py
index 3fc8d0a..ffabe23 100644
--- a/src/gooey/types/video_bots_page_request_tts_provider.py
+++ b/src/gooey/types/text_to_speech_request_tts_provider.py
@@ -2,6 +2,6 @@
import typing
-VideoBotsPageRequestTtsProvider = typing.Union[
+TextToSpeechRequestTtsProvider = typing.Union[
typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any
]
diff --git a/src/gooey/types/translate_request_selected_model.py b/src/gooey/types/translate_request_selected_model.py
new file mode 100644
index 0000000..b774b56
--- /dev/null
+++ b/src/gooey/types/translate_request_selected_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+TranslateRequestSelectedModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any]
diff --git a/src/gooey/types/translation_page_request.py b/src/gooey/types/translation_page_request.py
deleted file mode 100644
index 2c0f394..0000000
--- a/src/gooey/types/translation_page_request.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .recipe_function import RecipeFunction
-from .run_settings import RunSettings
-from .translation_page_request_selected_model import TranslationPageRequestSelectedModel
-
-
-class TranslationPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- texts: typing.Optional[typing.List[str]] = None
- selected_model: typing.Optional[TranslationPageRequestSelectedModel] = None
- translation_source: typing.Optional[str] = None
- translation_target: typing.Optional[str] = None
- glossary_document: typing.Optional[str] = pydantic.Field(default=None)
- """
- Provide a glossary to customize translation and improve accuracy of domain-specific terms.
- If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
- """
-
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/translation_page_request_selected_model.py b/src/gooey/types/translation_page_request_selected_model.py
deleted file mode 100644
index 62ae9ab..0000000
--- a/src/gooey/types/translation_page_request_selected_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-TranslationPageRequestSelectedModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any]
diff --git a/src/gooey/types/translation_page_response.py b/src/gooey/types/translation_page_response.py
deleted file mode 100644
index 00c0948..0000000
--- a/src/gooey/types/translation_page_response.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .translation_page_output import TranslationPageOutput
-
-
-class TranslationPageResponse(UniversalBaseModel):
- id: str = pydantic.Field()
- """
- Unique ID for this run
- """
-
- url: str = pydantic.Field()
- """
- Web URL for this run
- """
-
- created_at: str = pydantic.Field()
- """
- Time when the run was created as ISO format
- """
-
- output: TranslationPageOutput = pydantic.Field()
- """
- Output of the run
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/compare_upscaler_page_request_selected_models_item.py b/src/gooey/types/upscale_request_selected_models_item.py
similarity index 74%
rename from src/gooey/types/compare_upscaler_page_request_selected_models_item.py
rename to src/gooey/types/upscale_request_selected_models_item.py
index eff4f6e..1a8362e 100644
--- a/src/gooey/types/compare_upscaler_page_request_selected_models_item.py
+++ b/src/gooey/types/upscale_request_selected_models_item.py
@@ -2,6 +2,6 @@
import typing
-CompareUpscalerPageRequestSelectedModelsItem = typing.Union[
+UpscaleRequestSelectedModelsItem = typing.Union[
typing.Literal["gfpgan_1_4", "real_esrgan_x2", "sd_x4", "real_esrgan", "gfpgan"], typing.Any
]
diff --git a/src/gooey/types/video_bots_page_request.py b/src/gooey/types/video_bots_page_request.py
deleted file mode 100644
index f6824e8..0000000
--- a/src/gooey/types/video_bots_page_request.py
+++ /dev/null
@@ -1,140 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .conversation_entry import ConversationEntry
-from .llm_tools import LlmTools
-from .recipe_function import RecipeFunction
-from .run_settings import RunSettings
-from .sad_talker_settings import SadTalkerSettings
-from .video_bots_page_request_asr_model import VideoBotsPageRequestAsrModel
-from .video_bots_page_request_citation_style import VideoBotsPageRequestCitationStyle
-from .video_bots_page_request_embedding_model import VideoBotsPageRequestEmbeddingModel
-from .video_bots_page_request_lipsync_model import VideoBotsPageRequestLipsyncModel
-from .video_bots_page_request_openai_tts_model import VideoBotsPageRequestOpenaiTtsModel
-from .video_bots_page_request_openai_voice_name import VideoBotsPageRequestOpenaiVoiceName
-from .video_bots_page_request_response_format_type import VideoBotsPageRequestResponseFormatType
-from .video_bots_page_request_selected_model import VideoBotsPageRequestSelectedModel
-from .video_bots_page_request_translation_model import VideoBotsPageRequestTranslationModel
-from .video_bots_page_request_tts_provider import VideoBotsPageRequestTtsProvider
-
-
-class VideoBotsPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- input_prompt: typing.Optional[str] = None
- input_audio: typing.Optional[str] = None
- input_images: typing.Optional[typing.List[str]] = None
- input_documents: typing.Optional[typing.List[str]] = None
- doc_extract_url: typing.Optional[str] = pydantic.Field(default=None)
- """
- Select a workflow to extract text from documents and images.
- """
-
- messages: typing.Optional[typing.List[ConversationEntry]] = None
- bot_script: typing.Optional[str] = None
- selected_model: typing.Optional[VideoBotsPageRequestSelectedModel] = None
- document_model: typing.Optional[str] = pydantic.Field(default=None)
- """
- When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api))
- """
-
- task_instructions: typing.Optional[str] = None
- query_instructions: typing.Optional[str] = None
- keyword_instructions: typing.Optional[str] = None
- documents: typing.Optional[typing.List[str]] = None
- max_references: typing.Optional[int] = None
- max_context_words: typing.Optional[int] = None
- scroll_jump: typing.Optional[int] = None
- embedding_model: typing.Optional[VideoBotsPageRequestEmbeddingModel] = None
- dense_weight: typing.Optional[float] = pydantic.Field(default=None)
- """
- Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
- Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
- """
-
- citation_style: typing.Optional[VideoBotsPageRequestCitationStyle] = None
- use_url_shortener: typing.Optional[bool] = None
- asr_model: typing.Optional[VideoBotsPageRequestAsrModel] = pydantic.Field(default=None)
- """
- Choose a model to transcribe incoming audio messages to text.
- """
-
- asr_language: typing.Optional[str] = pydantic.Field(default=None)
- """
- Choose a language to transcribe incoming audio messages to text.
- """
-
- translation_model: typing.Optional[VideoBotsPageRequestTranslationModel] = None
- user_language: typing.Optional[str] = pydantic.Field(default=None)
- """
- Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages.
- """
-
- input_glossary_document: typing.Optional[str] = pydantic.Field(default=None)
- """
- Translation Glossary for User Langauge -> LLM Language (English)
- """
-
- output_glossary_document: typing.Optional[str] = pydantic.Field(default=None)
- """
- Translation Glossary for LLM Language (English) -> User Langauge
- """
-
- lipsync_model: typing.Optional[VideoBotsPageRequestLipsyncModel] = None
- tools: typing.Optional[typing.List[LlmTools]] = pydantic.Field(default=None)
- """
- Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
- """
-
- avoid_repetition: typing.Optional[bool] = None
- num_outputs: typing.Optional[int] = None
- quality: typing.Optional[float] = None
- max_tokens: typing.Optional[int] = None
- sampling_temperature: typing.Optional[float] = None
- response_format_type: typing.Optional[VideoBotsPageRequestResponseFormatType] = None
- tts_provider: typing.Optional[VideoBotsPageRequestTtsProvider] = None
- uberduck_voice_name: typing.Optional[str] = None
- uberduck_speaking_rate: typing.Optional[float] = None
- google_voice_name: typing.Optional[str] = None
- google_speaking_rate: typing.Optional[float] = None
- google_pitch: typing.Optional[float] = None
- bark_history_prompt: typing.Optional[str] = None
- elevenlabs_voice_name: typing.Optional[str] = pydantic.Field(default=None)
- """
- Use `elevenlabs_voice_id` instead
- """
-
- elevenlabs_api_key: typing.Optional[str] = None
- elevenlabs_voice_id: typing.Optional[str] = None
- elevenlabs_model: typing.Optional[str] = None
- elevenlabs_stability: typing.Optional[float] = None
- elevenlabs_similarity_boost: typing.Optional[float] = None
- elevenlabs_style: typing.Optional[float] = None
- elevenlabs_speaker_boost: typing.Optional[bool] = None
- azure_voice_name: typing.Optional[str] = None
- openai_voice_name: typing.Optional[VideoBotsPageRequestOpenaiVoiceName] = None
- openai_tts_model: typing.Optional[VideoBotsPageRequestOpenaiTtsModel] = None
- input_face: typing.Optional[str] = None
- face_padding_top: typing.Optional[int] = None
- face_padding_bottom: typing.Optional[int] = None
- face_padding_left: typing.Optional[int] = None
- face_padding_right: typing.Optional[int] = None
- sadtalker_settings: typing.Optional[SadTalkerSettings] = None
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/video_bots_page_request_embedding_model.py b/src/gooey/types/video_bots_page_request_embedding_model.py
deleted file mode 100644
index 19c8972..0000000
--- a/src/gooey/types/video_bots_page_request_embedding_model.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-VideoBotsPageRequestEmbeddingModel = typing.Union[
- typing.Literal[
- "openai_3_large",
- "openai_3_small",
- "openai_ada_2",
- "e5_large_v2",
- "e5_base_v2",
- "multilingual_e5_base",
- "multilingual_e5_large",
- "gte_large",
- "gte_base",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/video_bots_page_request_lipsync_model.py b/src/gooey/types/video_bots_page_request_lipsync_model.py
deleted file mode 100644
index 3bb98e0..0000000
--- a/src/gooey/types/video_bots_page_request_lipsync_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-VideoBotsPageRequestLipsyncModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any]
diff --git a/src/gooey/types/video_bots_page_request_openai_tts_model.py b/src/gooey/types/video_bots_page_request_openai_tts_model.py
deleted file mode 100644
index 1df5de0..0000000
--- a/src/gooey/types/video_bots_page_request_openai_tts_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-VideoBotsPageRequestOpenaiTtsModel = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any]
diff --git a/src/gooey/types/video_bots_page_request_response_format_type.py b/src/gooey/types/video_bots_page_request_response_format_type.py
deleted file mode 100644
index 25cc8f1..0000000
--- a/src/gooey/types/video_bots_page_request_response_format_type.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-VideoBotsPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/types/video_bots_page_request_selected_model.py b/src/gooey/types/video_bots_page_request_selected_model.py
deleted file mode 100644
index e327a7d..0000000
--- a/src/gooey/types/video_bots_page_request_selected_model.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-VideoBotsPageRequestSelectedModel = typing.Union[
- typing.Literal[
- "gpt_4_o",
- "gpt_4_o_mini",
- "chatgpt_4_o",
- "gpt_4_turbo_vision",
- "gpt_4_vision",
- "gpt_4_turbo",
- "gpt_4",
- "gpt_4_32k",
- "gpt_3_5_turbo",
- "gpt_3_5_turbo_16k",
- "gpt_3_5_turbo_instruct",
- "llama3_70b",
- "llama_3_groq_70b_tool_use",
- "llama3_8b",
- "llama_3_groq_8b_tool_use",
- "llama2_70b_chat",
- "mixtral_8x7b_instruct_0_1",
- "gemma_2_9b_it",
- "gemma_7b_it",
- "gemini_1_5_flash",
- "gemini_1_5_pro",
- "gemini_1_pro_vision",
- "gemini_1_pro",
- "palm2_chat",
- "palm2_text",
- "claude_3_5_sonnet",
- "claude_3_opus",
- "claude_3_sonnet",
- "claude_3_haiku",
- "sea_lion_7b_instruct",
- "llama3_8b_cpt_sea_lion_v2_instruct",
- "sarvam_2b",
- "text_davinci_003",
- "text_davinci_002",
- "code_davinci_002",
- "text_curie_001",
- "text_babbage_001",
- "text_ada_001",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/video_bots_page_request_translation_model.py b/src/gooey/types/video_bots_page_request_translation_model.py
deleted file mode 100644
index 0373c0c..0000000
--- a/src/gooey/types/video_bots_page_request_translation_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-VideoBotsPageRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any]
diff --git a/src/gooey/types/video_bots_page_response.py b/src/gooey/types/video_bots_page_response.py
deleted file mode 100644
index b6726fd..0000000
--- a/src/gooey/types/video_bots_page_response.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .video_bots_page_output import VideoBotsPageOutput
-
-
-class VideoBotsPageResponse(UniversalBaseModel):
- id: str = pydantic.Field()
- """
- Unique ID for this run
- """
-
- url: str = pydantic.Field()
- """
- Web URL for this run
- """
-
- created_at: str = pydantic.Field()
- """
- Time when the run was created as ISO format
- """
-
- output: VideoBotsPageOutput = pydantic.Field()
- """
- Output of the run
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/embeddings_page_request_selected_model.py b/src/gooey/types/web_search_llm_request_embedding_model.py
similarity index 87%
rename from src/gooey/types/embeddings_page_request_selected_model.py
rename to src/gooey/types/web_search_llm_request_embedding_model.py
index a03ecc8..1e9a6c5 100644
--- a/src/gooey/types/embeddings_page_request_selected_model.py
+++ b/src/gooey/types/web_search_llm_request_embedding_model.py
@@ -2,7 +2,7 @@
import typing
-EmbeddingsPageRequestSelectedModel = typing.Union[
+WebSearchLlmRequestEmbeddingModel = typing.Union[
typing.Literal[
"openai_3_large",
"openai_3_small",
diff --git a/src/gooey/types/bulk_eval_page_request_response_format_type.py b/src/gooey/types/web_search_llm_request_response_format_type.py
similarity index 65%
rename from src/gooey/types/bulk_eval_page_request_response_format_type.py
rename to src/gooey/types/web_search_llm_request_response_format_type.py
index f1c242f..4989a6b 100644
--- a/src/gooey/types/bulk_eval_page_request_response_format_type.py
+++ b/src/gooey/types/web_search_llm_request_response_format_type.py
@@ -2,4 +2,4 @@
import typing
-BulkEvalPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
+WebSearchLlmRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/types/web_search_llm_request_selected_model.py b/src/gooey/types/web_search_llm_request_selected_model.py
new file mode 100644
index 0000000..d43a330
--- /dev/null
+++ b/src/gooey/types/web_search_llm_request_selected_model.py
@@ -0,0 +1,47 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+WebSearchLlmRequestSelectedModel = typing.Union[
+ typing.Literal[
+ "gpt_4_o",
+ "gpt_4_o_mini",
+ "chatgpt_4_o",
+ "gpt_4_turbo_vision",
+ "gpt_4_vision",
+ "gpt_4_turbo",
+ "gpt_4",
+ "gpt_4_32k",
+ "gpt_3_5_turbo",
+ "gpt_3_5_turbo_16k",
+ "gpt_3_5_turbo_instruct",
+ "llama3_70b",
+ "llama_3_groq_70b_tool_use",
+ "llama3_8b",
+ "llama_3_groq_8b_tool_use",
+ "llama2_70b_chat",
+ "mixtral_8x7b_instruct_0_1",
+ "gemma_2_9b_it",
+ "gemma_7b_it",
+ "gemini_1_5_flash",
+ "gemini_1_5_pro",
+ "gemini_1_pro_vision",
+ "gemini_1_pro",
+ "palm2_chat",
+ "palm2_text",
+ "claude_3_5_sonnet",
+ "claude_3_opus",
+ "claude_3_sonnet",
+ "claude_3_haiku",
+ "sea_lion_7b_instruct",
+ "llama3_8b_cpt_sea_lion_v2_instruct",
+ "sarvam_2b",
+ "text_davinci_003",
+ "text_davinci_002",
+ "code_davinci_002",
+ "text_curie_001",
+ "text_babbage_001",
+ "text_ada_001",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/web_search_gpt3/__init__.py b/src/gooey/web_search_gpt3/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/web_search_gpt3/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/web_search_gpt3/client.py b/src/gooey/web_search_gpt3/client.py
new file mode 100644
index 0000000..5b2b824
--- /dev/null
+++ b/src/gooey/web_search_gpt3/client.py
@@ -0,0 +1,133 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import parse_obj_as
+from ..core.request_options import RequestOptions
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.google_gpt_page_status_response import GoogleGptPageStatusResponse
+from ..types.http_validation_error import HttpValidationError
+
+
+class WebSearchGpt3Client:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def status_google_gpt(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> GoogleGptPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GoogleGptPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import Gooey
+
+ client = Gooey(
+ api_key="YOUR_API_KEY",
+ )
+ client.web_search_gpt3.status_google_gpt(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/google-gpt/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(GoogleGptPageStatusResponse, parse_obj_as(type_=GoogleGptPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncWebSearchGpt3Client:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def status_google_gpt(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> GoogleGptPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GoogleGptPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AsyncGooey
+
+ client = AsyncGooey(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.web_search_gpt3.status_google_gpt(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/google-gpt/status", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(GoogleGptPageStatusResponse, parse_obj_as(type_=GoogleGptPageStatusResponse, object_=_response.json())) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(
+ typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)