From 4904bd41da3d7ce3e8aaed91784483270c3cac99 Mon Sep 17 00:00:00 2001
From: fern-api <115122769+fern-api[bot]@users.noreply.github.com>
Date: Mon, 22 Jul 2024 19:21:23 +0000
Subject: [PATCH] SDK regeneration
---
.fernignore | 1 +
.github/workflows/ci.yml | 61 +
.gitignore | 4 +
LICENSE | 201 -
README.md | 131 +
poetry.lock | 471 +
pyproject.toml | 56 +
reference.md | 14921 ++++++++++++++++
src/gooey/__init__.py | 614 +
src/gooey/ai_animation_generator/__init__.py | 2 +
src/gooey/ai_animation_generator/client.py | 644 +
src/gooey/ai_art_qr_code/__init__.py | 2 +
src/gooey/ai_art_qr_code/client.py | 867 +
src/gooey/ai_background_changer/__init__.py | 2 +
src/gooey/ai_background_changer/client.py | 555 +
.../__init__.py | 2 +
.../client.py | 799 +
src/gooey/ai_image_with_a_face/__init__.py | 2 +
src/gooey/ai_image_with_a_face/client.py | 655 +
src/gooey/bulk_runner/__init__.py | 2 +
src/gooey/bulk_runner/client.py | 550 +
src/gooey/chyron_plant_bot/__init__.py | 2 +
src/gooey/chyron_plant_bot/client.py | 474 +
src/gooey/client.py | 303 +
.../compare_ai_image_generators/__init__.py | 2 +
.../compare_ai_image_generators/client.py | 668 +
.../compare_ai_image_upscalers/__init__.py | 2 +
.../compare_ai_image_upscalers/client.py | 519 +
src/gooey/compare_ai_translations/__init__.py | 2 +
src/gooey/compare_ai_translations/client.py | 507 +
.../compare_ai_voice_generators/__init__.py | 2 +
.../compare_ai_voice_generators/client.py | 737 +
.../copilot_for_your_enterprise/__init__.py | 2 +
.../copilot_for_your_enterprise/client.py | 1386 ++
src/gooey/copilot_integrations/__init__.py | 27 +
src/gooey/copilot_integrations/client.py | 828 +
.../copilot_integrations/types/__init__.py | 25 +
.../types/create_stream_request_asr_model.py | 22 +
.../create_stream_request_citation_style.py | 25 +
.../create_stream_request_embedding_model.py | 18 +
.../create_stream_request_lipsync_model.py | 5 +
.../create_stream_request_openai_tts_model.py | 5 +
...create_stream_request_openai_voice_name.py | 7 +
.../create_stream_request_selected_model.py | 39 +
...create_stream_request_translation_model.py | 5 +
.../create_stream_request_tts_provider.py | 7 +
.../types/video_bots_stream_response.py | 11 +
src/gooey/core/__init__.py | 30 +
src/gooey/core/api_error.py | 15 +
src/gooey/core/client_wrapper.py | 68 +
src/gooey/core/datetime_utils.py | 28 +
src/gooey/core/file.py | 38 +
src/gooey/core/http_client.py | 475 +
src/gooey/core/jsonable_encoder.py | 102 +
src/gooey/core/pydantic_utilities.py | 28 +
src/gooey/core/query_encoder.py | 33 +
src/gooey/core/remove_none_from_dict.py | 11 +
src/gooey/core/request_options.py | 32 +
.../__init__.py | 2 +
.../client.py | 716 +
.../edit_an_image_with_ai_prompt/__init__.py | 2 +
.../edit_an_image_with_ai_prompt/client.py | 652 +
src/gooey/embeddings/__init__.py | 2 +
src/gooey/embeddings/client.py | 459 +
src/gooey/environment.py | 7 +
src/gooey/errors/__init__.py | 8 +
src/gooey/errors/internal_server_error.py | 9 +
src/gooey/errors/payment_required_error.py | 10 +
src/gooey/errors/too_many_requests_error.py | 9 +
.../errors/unprocessable_entity_error.py | 9 +
src/gooey/evaluator/__init__.py | 2 +
src/gooey/evaluator/client.py | 601 +
src/gooey/functions/__init__.py | 2 +
src/gooey/functions/client.py | 405 +
.../__init__.py | 2 +
.../client.py | 770 +
.../__init__.py | 2 +
.../client.py | 671 +
.../large_language_models_gpt3/__init__.py | 2 +
.../large_language_models_gpt3/client.py | 548 +
src/gooey/letter_writer/__init__.py | 2 +
src/gooey/letter_writer/client.py | 651 +
src/gooey/lip_syncing/__init__.py | 2 +
src/gooey/lip_syncing/client.py | 548 +
.../lipsync_video_with_any_text/__init__.py | 2 +
.../lipsync_video_with_any_text/client.py | 851 +
src/gooey/misc/__init__.py | 2 +
src/gooey/misc/client.py | 351 +
.../__init__.py | 2 +
.../client.py | 800 +
.../__init__.py | 2 +
.../client.py | 555 +
src/gooey/py.typed | 0
.../__init__.py | 2 +
.../client.py | 644 +
.../search_your_docs_with_gpt/__init__.py | 2 +
src/gooey/search_your_docs_with_gpt/client.py | 726 +
src/gooey/smart_gpt/__init__.py | 2 +
src/gooey/smart_gpt/client.py | 587 +
.../__init__.py | 2 +
.../speech_recognition_translation/client.py | 585 +
.../summarize_your_docs_with_gpt/__init__.py | 2 +
.../summarize_your_docs_with_gpt/client.py | 620 +
.../__init__.py | 2 +
.../client.py | 628 +
.../text_guided_audio_generator/__init__.py | 2 +
.../text_guided_audio_generator/client.py | 570 +
src/gooey/types/__init__.py | 515 +
src/gooey/types/agg_function.py | 31 +
src/gooey/types/agg_function_result.py | 33 +
src/gooey/types/animation_prompt.py | 30 +
src/gooey/types/asr_chunk.py | 31 +
src/gooey/types/asr_output_json.py | 31 +
src/gooey/types/asr_page_output.py | 33 +
.../types/asr_page_output_output_text_item.py | 7 +
src/gooey/types/asr_page_request.py | 58 +
.../types/asr_page_request_output_format.py | 5 +
.../types/asr_page_request_selected_model.py | 22 +
.../asr_page_request_translation_model.py | 5 +
src/gooey/types/asr_page_response.py | 48 +
src/gooey/types/asr_page_status_response.py | 64 +
.../types/async_api_response_model_v3.py | 47 +
src/gooey/types/balance_response.py | 32 +
src/gooey/types/bot_broadcast_filters.py | 51 +
src/gooey/types/bulk_eval_page_output.py | 34 +
src/gooey/types/bulk_eval_page_request.py | 64 +
.../bulk_eval_page_request_selected_model.py | 39 +
src/gooey/types/bulk_eval_page_response.py | 48 +
.../types/bulk_eval_page_status_response.py | 64 +
src/gooey/types/bulk_runner_page_output.py | 36 +
src/gooey/types/bulk_runner_page_request.py | 65 +
src/gooey/types/bulk_runner_page_response.py | 48 +
.../types/bulk_runner_page_status_response.py | 64 +
src/gooey/types/button_pressed.py | 37 +
src/gooey/types/called_function_response.py | 32 +
...hat_completion_content_part_image_param.py | 30 +
...chat_completion_content_part_text_param.py | 29 +
src/gooey/types/chyron_plant_page_output.py | 32 +
src/gooey/types/chyron_plant_page_request.py | 40 +
src/gooey/types/chyron_plant_page_response.py | 48 +
.../chyron_plant_page_status_response.py | 64 +
src/gooey/types/compare_llm_page_output.py | 31 +
src/gooey/types/compare_llm_page_request.py | 47 +
...e_llm_page_request_response_format_type.py | 5 +
...e_llm_page_request_selected_models_item.py | 39 +
src/gooey/types/compare_llm_page_response.py | 48 +
.../types/compare_llm_page_status_response.py | 64 +
.../types/compare_text2img_page_output.py | 31 +
.../types/compare_text2img_page_request.py | 56 +
...t2img_page_request_selected_models_item.py | 22 +
.../types/compare_text2img_page_response.py | 48 +
.../compare_text2img_page_status_response.py | 64 +
.../types/compare_upscaler_page_output.py | 40 +
.../types/compare_upscaler_page_request.py | 55 +
...caler_page_request_selected_models_item.py | 7 +
.../types/compare_upscaler_page_response.py | 48 +
.../compare_upscaler_page_status_response.py | 64 +
src/gooey/types/console_logs.py | 31 +
src/gooey/types/content.py | 7 +
src/gooey/types/conversation_entry.py | 33 +
.../types/conversation_entry_content_item.py | 59 +
src/gooey/types/conversation_start.py | 57 +
src/gooey/types/create_stream_response.py | 32 +
src/gooey/types/deforum_sd_page_output.py | 31 +
src/gooey/types/deforum_sd_page_request.py | 53 +
.../deforum_sd_page_request_selected_model.py | 5 +
src/gooey/types/deforum_sd_page_response.py | 48 +
.../types/deforum_sd_page_status_response.py | 64 +
src/gooey/types/detail.py | 5 +
src/gooey/types/doc_extract_page_output.py | 30 +
src/gooey/types/doc_extract_page_request.py | 56 +
...extract_page_request_selected_asr_model.py | 22 +
...doc_extract_page_request_selected_model.py | 39 +
src/gooey/types/doc_extract_page_response.py | 48 +
.../types/doc_extract_page_status_response.py | 64 +
src/gooey/types/doc_search_page_output.py | 35 +
src/gooey/types/doc_search_page_request.py | 64 +
.../doc_search_page_request_citation_style.py | 25 +
...doc_search_page_request_embedding_model.py | 18 +
.../doc_search_page_request_keyword_query.py | 5 +
.../doc_search_page_request_selected_model.py | 39 +
src/gooey/types/doc_search_page_response.py | 48 +
.../types/doc_search_page_status_response.py | 64 +
src/gooey/types/doc_summary_page_output.py | 34 +
src/gooey/types/doc_summary_page_request.py | 51 +
...summary_page_request_selected_asr_model.py | 22 +
...doc_summary_page_request_selected_model.py | 39 +
src/gooey/types/doc_summary_page_response.py | 48 +
.../types/doc_summary_page_status_response.py | 64 +
.../email_face_inpainting_page_output.py | 36 +
.../email_face_inpainting_page_request.py | 61 +
..._inpainting_page_request_selected_model.py | 7 +
.../email_face_inpainting_page_response.py | 48 +
...il_face_inpainting_page_status_response.py | 64 +
src/gooey/types/embeddings_page_output.py | 31 +
src/gooey/types/embeddings_page_request.py | 40 +
.../embeddings_page_request_selected_model.py | 18 +
src/gooey/types/embeddings_page_response.py | 48 +
.../types/embeddings_page_status_response.py | 64 +
src/gooey/types/eval_prompt.py | 30 +
.../types/face_inpainting_page_output.py | 34 +
.../types/face_inpainting_page_request.py | 52 +
..._inpainting_page_request_selected_model.py | 7 +
.../types/face_inpainting_page_response.py | 48 +
.../face_inpainting_page_status_response.py | 64 +
src/gooey/types/failed_reponse_model_v2.py | 30 +
src/gooey/types/failed_response_detail.py | 47 +
src/gooey/types/final_response.py | 69 +
src/gooey/types/function.py | 25 +
src/gooey/types/functions_page_output.py | 42 +
src/gooey/types/functions_page_request.py | 40 +
src/gooey/types/functions_page_response.py | 48 +
.../types/functions_page_status_response.py | 64 +
src/gooey/types/generic_error_response.py | 30 +
.../types/generic_error_response_detail.py | 29 +
src/gooey/types/google_gpt_page_output.py | 36 +
src/gooey/types/google_gpt_page_request.py | 74 +
...google_gpt_page_request_embedding_model.py | 18 +
.../google_gpt_page_request_selected_model.py | 39 +
src/gooey/types/google_gpt_page_response.py | 48 +
.../types/google_gpt_page_status_response.py | 64 +
.../types/google_image_gen_page_output.py | 33 +
.../types/google_image_gen_page_request.py | 58 +
...e_image_gen_page_request_selected_model.py | 21 +
.../types/google_image_gen_page_response.py | 48 +
.../google_image_gen_page_status_response.py | 64 +
src/gooey/types/http_validation_error.py | 30 +
.../types/image_segmentation_page_output.py | 34 +
.../types/image_segmentation_page_request.py | 46 +
...egmentation_page_request_selected_model.py | 5 +
.../types/image_segmentation_page_response.py | 48 +
...image_segmentation_page_status_response.py | 64 +
src/gooey/types/image_url.py | 31 +
src/gooey/types/img2img_page_output.py | 31 +
src/gooey/types/img2img_page_request.py | 53 +
..._page_request_selected_controlnet_model.py | 20 +
..._request_selected_controlnet_model_item.py | 20 +
.../img2img_page_request_selected_model.py | 21 +
src/gooey/types/img2img_page_response.py | 48 +
.../types/img2img_page_status_response.py | 64 +
src/gooey/types/letter_writer_page_output.py | 34 +
src/gooey/types/letter_writer_page_request.py | 54 +
.../types/letter_writer_page_response.py | 48 +
.../letter_writer_page_status_response.py | 64 +
src/gooey/types/level.py | 5 +
src/gooey/types/lipsync_page_output.py | 31 +
src/gooey/types/lipsync_page_request.py | 47 +
.../lipsync_page_request_selected_model.py | 5 +
src/gooey/types/lipsync_page_response.py | 48 +
.../types/lipsync_page_status_response.py | 64 +
src/gooey/types/lipsync_tts_page_output.py | 32 +
src/gooey/types/lipsync_tts_page_request.py | 72 +
...psync_tts_page_request_openai_tts_model.py | 5 +
...sync_tts_page_request_openai_voice_name.py | 7 +
...lipsync_tts_page_request_selected_model.py | 5 +
.../lipsync_tts_page_request_tts_provider.py | 7 +
src/gooey/types/lipsync_tts_page_response.py | 48 +
.../types/lipsync_tts_page_status_response.py | 64 +
src/gooey/types/llm_tools.py | 5 +
src/gooey/types/message_part.py | 49 +
.../types/object_inpainting_page_output.py | 33 +
.../types/object_inpainting_page_request.py | 55 +
..._inpainting_page_request_selected_model.py | 7 +
.../types/object_inpainting_page_response.py | 48 +
.../object_inpainting_page_status_response.py | 64 +
src/gooey/types/preprocess.py | 5 +
src/gooey/types/prompt.py | 7 +
src/gooey/types/prompt_tree_node.py | 36 +
.../types/qr_code_generator_page_output.py | 34 +
.../types/qr_code_generator_page_request.py | 76 +
...est_image_prompt_controlnet_models_item.py | 20 +
..._request_selected_controlnet_model_item.py | 20 +
...e_generator_page_request_selected_model.py | 22 +
.../types/qr_code_generator_page_response.py | 48 +
.../qr_code_generator_page_status_response.py | 64 +
src/gooey/types/recipe_function.py | 38 +
src/gooey/types/recipe_run_state.py | 5 +
.../types/related_doc_search_response.py | 34 +
.../types/related_google_gpt_response.py | 35 +
.../types/related_qn_a_doc_page_output.py | 33 +
.../types/related_qn_a_doc_page_request.py | 78 +
...ed_qn_a_doc_page_request_citation_style.py | 25 +
...d_qn_a_doc_page_request_embedding_model.py | 18 +
...ted_qn_a_doc_page_request_keyword_query.py | 5 +
...ed_qn_a_doc_page_request_selected_model.py | 39 +
.../types/related_qn_a_doc_page_response.py | 48 +
.../related_qn_a_doc_page_status_response.py | 64 +
src/gooey/types/related_qn_a_page_output.py | 33 +
src/gooey/types/related_qn_a_page_request.py | 74 +
...lated_qn_a_page_request_embedding_model.py | 18 +
...elated_qn_a_page_request_selected_model.py | 39 +
src/gooey/types/related_qn_a_page_response.py | 48 +
.../related_qn_a_page_status_response.py | 64 +
src/gooey/types/reply_button.py | 30 +
src/gooey/types/response_model.py | 45 +
.../response_model_final_keyword_query.py | 5 +
.../types/response_model_final_prompt.py | 7 +
src/gooey/types/role.py | 5 +
src/gooey/types/run_settings.py | 33 +
.../types/run_settings_retention_policy.py | 5 +
src/gooey/types/run_start.py | 52 +
src/gooey/types/sad_talker_settings.py | 58 +
src/gooey/types/scheduler.py | 23 +
src/gooey/types/search_reference.py | 32 +
src/gooey/types/seo_summary_page_output.py | 35 +
src/gooey/types/seo_summary_page_request.py | 60 +
...seo_summary_page_request_selected_model.py | 39 +
src/gooey/types/seo_summary_page_response.py | 48 +
.../types/seo_summary_page_status_response.py | 64 +
src/gooey/types/serp_search_location.py | 248 +
src/gooey/types/serp_search_type.py | 5 +
src/gooey/types/smart_gpt_page_output.py | 33 +
src/gooey/types/smart_gpt_page_request.py | 48 +
.../smart_gpt_page_request_selected_model.py | 39 +
src/gooey/types/smart_gpt_page_response.py | 48 +
.../types/smart_gpt_page_status_response.py | 64 +
.../types/social_lookup_email_page_output.py | 33 +
.../types/social_lookup_email_page_request.py | 46 +
...ookup_email_page_request_selected_model.py | 39 +
.../social_lookup_email_page_response.py | 48 +
...ocial_lookup_email_page_status_response.py | 64 +
src/gooey/types/stream_error.py | 37 +
src/gooey/types/text2audio_page_output.py | 31 +
src/gooey/types/text2audio_page_request.py | 48 +
src/gooey/types/text2audio_page_response.py | 48 +
.../types/text2audio_page_status_response.py | 64 +
src/gooey/types/text_to_speech_page_output.py | 31 +
.../types/text_to_speech_page_request.py | 63 +
...to_speech_page_request_openai_tts_model.py | 5 +
...o_speech_page_request_openai_voice_name.py | 7 +
...ext_to_speech_page_request_tts_provider.py | 7 +
.../types/text_to_speech_page_response.py | 48 +
.../text_to_speech_page_status_response.py | 64 +
src/gooey/types/training_data_model.py | 30 +
src/gooey/types/translation_page_output.py | 31 +
src/gooey/types/translation_page_request.py | 48 +
...translation_page_request_selected_model.py | 5 +
src/gooey/types/translation_page_response.py | 48 +
.../types/translation_page_status_response.py | 64 +
src/gooey/types/trigger.py | 5 +
src/gooey/types/validation_error.py | 32 +
src/gooey/types/validation_error_loc_item.py | 5 +
src/gooey/types/vcard.py | 54 +
src/gooey/types/video_bots_page_output.py | 47 +
...eo_bots_page_output_final_keyword_query.py | 5 +
.../video_bots_page_output_final_prompt.py | 7 +
src/gooey/types/video_bots_page_request.py | 147 +
.../video_bots_page_request_asr_model.py | 22 +
.../video_bots_page_request_citation_style.py | 25 +
...video_bots_page_request_embedding_model.py | 18 +
.../video_bots_page_request_lipsync_model.py | 5 +
...ideo_bots_page_request_openai_tts_model.py | 5 +
...deo_bots_page_request_openai_voice_name.py | 7 +
.../video_bots_page_request_selected_model.py | 39 +
...deo_bots_page_request_translation_model.py | 5 +
.../video_bots_page_request_tts_provider.py | 7 +
src/gooey/types/video_bots_page_response.py | 48 +
.../types/video_bots_page_status_response.py | 64 +
src/gooey/version.py | 4 +
src/gooey/web_search_gpt3/__init__.py | 2 +
src/gooey/web_search_gpt3/client.py | 770 +
tests/custom/test_client.py | 6 +
tests/utils/test_http_client.py | 47 +
tests/utils/test_query_encoding.py | 13 +
364 files changed, 50938 insertions(+), 201 deletions(-)
create mode 100644 .fernignore
create mode 100644 .github/workflows/ci.yml
create mode 100644 .gitignore
delete mode 100644 LICENSE
create mode 100644 README.md
create mode 100644 poetry.lock
create mode 100644 pyproject.toml
create mode 100644 reference.md
create mode 100644 src/gooey/__init__.py
create mode 100644 src/gooey/ai_animation_generator/__init__.py
create mode 100644 src/gooey/ai_animation_generator/client.py
create mode 100644 src/gooey/ai_art_qr_code/__init__.py
create mode 100644 src/gooey/ai_art_qr_code/client.py
create mode 100644 src/gooey/ai_background_changer/__init__.py
create mode 100644 src/gooey/ai_background_changer/client.py
create mode 100644 src/gooey/ai_generated_photo_from_email_profile_lookup/__init__.py
create mode 100644 src/gooey/ai_generated_photo_from_email_profile_lookup/client.py
create mode 100644 src/gooey/ai_image_with_a_face/__init__.py
create mode 100644 src/gooey/ai_image_with_a_face/client.py
create mode 100644 src/gooey/bulk_runner/__init__.py
create mode 100644 src/gooey/bulk_runner/client.py
create mode 100644 src/gooey/chyron_plant_bot/__init__.py
create mode 100644 src/gooey/chyron_plant_bot/client.py
create mode 100644 src/gooey/client.py
create mode 100644 src/gooey/compare_ai_image_generators/__init__.py
create mode 100644 src/gooey/compare_ai_image_generators/client.py
create mode 100644 src/gooey/compare_ai_image_upscalers/__init__.py
create mode 100644 src/gooey/compare_ai_image_upscalers/client.py
create mode 100644 src/gooey/compare_ai_translations/__init__.py
create mode 100644 src/gooey/compare_ai_translations/client.py
create mode 100644 src/gooey/compare_ai_voice_generators/__init__.py
create mode 100644 src/gooey/compare_ai_voice_generators/client.py
create mode 100644 src/gooey/copilot_for_your_enterprise/__init__.py
create mode 100644 src/gooey/copilot_for_your_enterprise/client.py
create mode 100644 src/gooey/copilot_integrations/__init__.py
create mode 100644 src/gooey/copilot_integrations/client.py
create mode 100644 src/gooey/copilot_integrations/types/__init__.py
create mode 100644 src/gooey/copilot_integrations/types/create_stream_request_asr_model.py
create mode 100644 src/gooey/copilot_integrations/types/create_stream_request_citation_style.py
create mode 100644 src/gooey/copilot_integrations/types/create_stream_request_embedding_model.py
create mode 100644 src/gooey/copilot_integrations/types/create_stream_request_lipsync_model.py
create mode 100644 src/gooey/copilot_integrations/types/create_stream_request_openai_tts_model.py
create mode 100644 src/gooey/copilot_integrations/types/create_stream_request_openai_voice_name.py
create mode 100644 src/gooey/copilot_integrations/types/create_stream_request_selected_model.py
create mode 100644 src/gooey/copilot_integrations/types/create_stream_request_translation_model.py
create mode 100644 src/gooey/copilot_integrations/types/create_stream_request_tts_provider.py
create mode 100644 src/gooey/copilot_integrations/types/video_bots_stream_response.py
create mode 100644 src/gooey/core/__init__.py
create mode 100644 src/gooey/core/api_error.py
create mode 100644 src/gooey/core/client_wrapper.py
create mode 100644 src/gooey/core/datetime_utils.py
create mode 100644 src/gooey/core/file.py
create mode 100644 src/gooey/core/http_client.py
create mode 100644 src/gooey/core/jsonable_encoder.py
create mode 100644 src/gooey/core/pydantic_utilities.py
create mode 100644 src/gooey/core/query_encoder.py
create mode 100644 src/gooey/core/remove_none_from_dict.py
create mode 100644 src/gooey/core/request_options.py
create mode 100644 src/gooey/create_a_perfect_seo_optimized_title_paragraph/__init__.py
create mode 100644 src/gooey/create_a_perfect_seo_optimized_title_paragraph/client.py
create mode 100644 src/gooey/edit_an_image_with_ai_prompt/__init__.py
create mode 100644 src/gooey/edit_an_image_with_ai_prompt/client.py
create mode 100644 src/gooey/embeddings/__init__.py
create mode 100644 src/gooey/embeddings/client.py
create mode 100644 src/gooey/environment.py
create mode 100644 src/gooey/errors/__init__.py
create mode 100644 src/gooey/errors/internal_server_error.py
create mode 100644 src/gooey/errors/payment_required_error.py
create mode 100644 src/gooey/errors/too_many_requests_error.py
create mode 100644 src/gooey/errors/unprocessable_entity_error.py
create mode 100644 src/gooey/evaluator/__init__.py
create mode 100644 src/gooey/evaluator/client.py
create mode 100644 src/gooey/functions/__init__.py
create mode 100644 src/gooey/functions/client.py
create mode 100644 src/gooey/generate_people_also_ask_seo_content/__init__.py
create mode 100644 src/gooey/generate_people_also_ask_seo_content/client.py
create mode 100644 src/gooey/generate_product_photo_backgrounds/__init__.py
create mode 100644 src/gooey/generate_product_photo_backgrounds/client.py
create mode 100644 src/gooey/large_language_models_gpt3/__init__.py
create mode 100644 src/gooey/large_language_models_gpt3/client.py
create mode 100644 src/gooey/letter_writer/__init__.py
create mode 100644 src/gooey/letter_writer/client.py
create mode 100644 src/gooey/lip_syncing/__init__.py
create mode 100644 src/gooey/lip_syncing/client.py
create mode 100644 src/gooey/lipsync_video_with_any_text/__init__.py
create mode 100644 src/gooey/lipsync_video_with_any_text/client.py
create mode 100644 src/gooey/misc/__init__.py
create mode 100644 src/gooey/misc/client.py
create mode 100644 src/gooey/people_also_ask_answers_from_a_doc/__init__.py
create mode 100644 src/gooey/people_also_ask_answers_from_a_doc/client.py
create mode 100644 src/gooey/profile_lookup_gpt3for_ai_personalized_emails/__init__.py
create mode 100644 src/gooey/profile_lookup_gpt3for_ai_personalized_emails/client.py
create mode 100644 src/gooey/py.typed
create mode 100644 src/gooey/render_image_search_results_with_ai/__init__.py
create mode 100644 src/gooey/render_image_search_results_with_ai/client.py
create mode 100644 src/gooey/search_your_docs_with_gpt/__init__.py
create mode 100644 src/gooey/search_your_docs_with_gpt/client.py
create mode 100644 src/gooey/smart_gpt/__init__.py
create mode 100644 src/gooey/smart_gpt/client.py
create mode 100644 src/gooey/speech_recognition_translation/__init__.py
create mode 100644 src/gooey/speech_recognition_translation/client.py
create mode 100644 src/gooey/summarize_your_docs_with_gpt/__init__.py
create mode 100644 src/gooey/summarize_your_docs_with_gpt/client.py
create mode 100644 src/gooey/synthetic_data_maker_for_videos_pd_fs/__init__.py
create mode 100644 src/gooey/synthetic_data_maker_for_videos_pd_fs/client.py
create mode 100644 src/gooey/text_guided_audio_generator/__init__.py
create mode 100644 src/gooey/text_guided_audio_generator/client.py
create mode 100644 src/gooey/types/__init__.py
create mode 100644 src/gooey/types/agg_function.py
create mode 100644 src/gooey/types/agg_function_result.py
create mode 100644 src/gooey/types/animation_prompt.py
create mode 100644 src/gooey/types/asr_chunk.py
create mode 100644 src/gooey/types/asr_output_json.py
create mode 100644 src/gooey/types/asr_page_output.py
create mode 100644 src/gooey/types/asr_page_output_output_text_item.py
create mode 100644 src/gooey/types/asr_page_request.py
create mode 100644 src/gooey/types/asr_page_request_output_format.py
create mode 100644 src/gooey/types/asr_page_request_selected_model.py
create mode 100644 src/gooey/types/asr_page_request_translation_model.py
create mode 100644 src/gooey/types/asr_page_response.py
create mode 100644 src/gooey/types/asr_page_status_response.py
create mode 100644 src/gooey/types/async_api_response_model_v3.py
create mode 100644 src/gooey/types/balance_response.py
create mode 100644 src/gooey/types/bot_broadcast_filters.py
create mode 100644 src/gooey/types/bulk_eval_page_output.py
create mode 100644 src/gooey/types/bulk_eval_page_request.py
create mode 100644 src/gooey/types/bulk_eval_page_request_selected_model.py
create mode 100644 src/gooey/types/bulk_eval_page_response.py
create mode 100644 src/gooey/types/bulk_eval_page_status_response.py
create mode 100644 src/gooey/types/bulk_runner_page_output.py
create mode 100644 src/gooey/types/bulk_runner_page_request.py
create mode 100644 src/gooey/types/bulk_runner_page_response.py
create mode 100644 src/gooey/types/bulk_runner_page_status_response.py
create mode 100644 src/gooey/types/button_pressed.py
create mode 100644 src/gooey/types/called_function_response.py
create mode 100644 src/gooey/types/chat_completion_content_part_image_param.py
create mode 100644 src/gooey/types/chat_completion_content_part_text_param.py
create mode 100644 src/gooey/types/chyron_plant_page_output.py
create mode 100644 src/gooey/types/chyron_plant_page_request.py
create mode 100644 src/gooey/types/chyron_plant_page_response.py
create mode 100644 src/gooey/types/chyron_plant_page_status_response.py
create mode 100644 src/gooey/types/compare_llm_page_output.py
create mode 100644 src/gooey/types/compare_llm_page_request.py
create mode 100644 src/gooey/types/compare_llm_page_request_response_format_type.py
create mode 100644 src/gooey/types/compare_llm_page_request_selected_models_item.py
create mode 100644 src/gooey/types/compare_llm_page_response.py
create mode 100644 src/gooey/types/compare_llm_page_status_response.py
create mode 100644 src/gooey/types/compare_text2img_page_output.py
create mode 100644 src/gooey/types/compare_text2img_page_request.py
create mode 100644 src/gooey/types/compare_text2img_page_request_selected_models_item.py
create mode 100644 src/gooey/types/compare_text2img_page_response.py
create mode 100644 src/gooey/types/compare_text2img_page_status_response.py
create mode 100644 src/gooey/types/compare_upscaler_page_output.py
create mode 100644 src/gooey/types/compare_upscaler_page_request.py
create mode 100644 src/gooey/types/compare_upscaler_page_request_selected_models_item.py
create mode 100644 src/gooey/types/compare_upscaler_page_response.py
create mode 100644 src/gooey/types/compare_upscaler_page_status_response.py
create mode 100644 src/gooey/types/console_logs.py
create mode 100644 src/gooey/types/content.py
create mode 100644 src/gooey/types/conversation_entry.py
create mode 100644 src/gooey/types/conversation_entry_content_item.py
create mode 100644 src/gooey/types/conversation_start.py
create mode 100644 src/gooey/types/create_stream_response.py
create mode 100644 src/gooey/types/deforum_sd_page_output.py
create mode 100644 src/gooey/types/deforum_sd_page_request.py
create mode 100644 src/gooey/types/deforum_sd_page_request_selected_model.py
create mode 100644 src/gooey/types/deforum_sd_page_response.py
create mode 100644 src/gooey/types/deforum_sd_page_status_response.py
create mode 100644 src/gooey/types/detail.py
create mode 100644 src/gooey/types/doc_extract_page_output.py
create mode 100644 src/gooey/types/doc_extract_page_request.py
create mode 100644 src/gooey/types/doc_extract_page_request_selected_asr_model.py
create mode 100644 src/gooey/types/doc_extract_page_request_selected_model.py
create mode 100644 src/gooey/types/doc_extract_page_response.py
create mode 100644 src/gooey/types/doc_extract_page_status_response.py
create mode 100644 src/gooey/types/doc_search_page_output.py
create mode 100644 src/gooey/types/doc_search_page_request.py
create mode 100644 src/gooey/types/doc_search_page_request_citation_style.py
create mode 100644 src/gooey/types/doc_search_page_request_embedding_model.py
create mode 100644 src/gooey/types/doc_search_page_request_keyword_query.py
create mode 100644 src/gooey/types/doc_search_page_request_selected_model.py
create mode 100644 src/gooey/types/doc_search_page_response.py
create mode 100644 src/gooey/types/doc_search_page_status_response.py
create mode 100644 src/gooey/types/doc_summary_page_output.py
create mode 100644 src/gooey/types/doc_summary_page_request.py
create mode 100644 src/gooey/types/doc_summary_page_request_selected_asr_model.py
create mode 100644 src/gooey/types/doc_summary_page_request_selected_model.py
create mode 100644 src/gooey/types/doc_summary_page_response.py
create mode 100644 src/gooey/types/doc_summary_page_status_response.py
create mode 100644 src/gooey/types/email_face_inpainting_page_output.py
create mode 100644 src/gooey/types/email_face_inpainting_page_request.py
create mode 100644 src/gooey/types/email_face_inpainting_page_request_selected_model.py
create mode 100644 src/gooey/types/email_face_inpainting_page_response.py
create mode 100644 src/gooey/types/email_face_inpainting_page_status_response.py
create mode 100644 src/gooey/types/embeddings_page_output.py
create mode 100644 src/gooey/types/embeddings_page_request.py
create mode 100644 src/gooey/types/embeddings_page_request_selected_model.py
create mode 100644 src/gooey/types/embeddings_page_response.py
create mode 100644 src/gooey/types/embeddings_page_status_response.py
create mode 100644 src/gooey/types/eval_prompt.py
create mode 100644 src/gooey/types/face_inpainting_page_output.py
create mode 100644 src/gooey/types/face_inpainting_page_request.py
create mode 100644 src/gooey/types/face_inpainting_page_request_selected_model.py
create mode 100644 src/gooey/types/face_inpainting_page_response.py
create mode 100644 src/gooey/types/face_inpainting_page_status_response.py
create mode 100644 src/gooey/types/failed_reponse_model_v2.py
create mode 100644 src/gooey/types/failed_response_detail.py
create mode 100644 src/gooey/types/final_response.py
create mode 100644 src/gooey/types/function.py
create mode 100644 src/gooey/types/functions_page_output.py
create mode 100644 src/gooey/types/functions_page_request.py
create mode 100644 src/gooey/types/functions_page_response.py
create mode 100644 src/gooey/types/functions_page_status_response.py
create mode 100644 src/gooey/types/generic_error_response.py
create mode 100644 src/gooey/types/generic_error_response_detail.py
create mode 100644 src/gooey/types/google_gpt_page_output.py
create mode 100644 src/gooey/types/google_gpt_page_request.py
create mode 100644 src/gooey/types/google_gpt_page_request_embedding_model.py
create mode 100644 src/gooey/types/google_gpt_page_request_selected_model.py
create mode 100644 src/gooey/types/google_gpt_page_response.py
create mode 100644 src/gooey/types/google_gpt_page_status_response.py
create mode 100644 src/gooey/types/google_image_gen_page_output.py
create mode 100644 src/gooey/types/google_image_gen_page_request.py
create mode 100644 src/gooey/types/google_image_gen_page_request_selected_model.py
create mode 100644 src/gooey/types/google_image_gen_page_response.py
create mode 100644 src/gooey/types/google_image_gen_page_status_response.py
create mode 100644 src/gooey/types/http_validation_error.py
create mode 100644 src/gooey/types/image_segmentation_page_output.py
create mode 100644 src/gooey/types/image_segmentation_page_request.py
create mode 100644 src/gooey/types/image_segmentation_page_request_selected_model.py
create mode 100644 src/gooey/types/image_segmentation_page_response.py
create mode 100644 src/gooey/types/image_segmentation_page_status_response.py
create mode 100644 src/gooey/types/image_url.py
create mode 100644 src/gooey/types/img2img_page_output.py
create mode 100644 src/gooey/types/img2img_page_request.py
create mode 100644 src/gooey/types/img2img_page_request_selected_controlnet_model.py
create mode 100644 src/gooey/types/img2img_page_request_selected_controlnet_model_item.py
create mode 100644 src/gooey/types/img2img_page_request_selected_model.py
create mode 100644 src/gooey/types/img2img_page_response.py
create mode 100644 src/gooey/types/img2img_page_status_response.py
create mode 100644 src/gooey/types/letter_writer_page_output.py
create mode 100644 src/gooey/types/letter_writer_page_request.py
create mode 100644 src/gooey/types/letter_writer_page_response.py
create mode 100644 src/gooey/types/letter_writer_page_status_response.py
create mode 100644 src/gooey/types/level.py
create mode 100644 src/gooey/types/lipsync_page_output.py
create mode 100644 src/gooey/types/lipsync_page_request.py
create mode 100644 src/gooey/types/lipsync_page_request_selected_model.py
create mode 100644 src/gooey/types/lipsync_page_response.py
create mode 100644 src/gooey/types/lipsync_page_status_response.py
create mode 100644 src/gooey/types/lipsync_tts_page_output.py
create mode 100644 src/gooey/types/lipsync_tts_page_request.py
create mode 100644 src/gooey/types/lipsync_tts_page_request_openai_tts_model.py
create mode 100644 src/gooey/types/lipsync_tts_page_request_openai_voice_name.py
create mode 100644 src/gooey/types/lipsync_tts_page_request_selected_model.py
create mode 100644 src/gooey/types/lipsync_tts_page_request_tts_provider.py
create mode 100644 src/gooey/types/lipsync_tts_page_response.py
create mode 100644 src/gooey/types/lipsync_tts_page_status_response.py
create mode 100644 src/gooey/types/llm_tools.py
create mode 100644 src/gooey/types/message_part.py
create mode 100644 src/gooey/types/object_inpainting_page_output.py
create mode 100644 src/gooey/types/object_inpainting_page_request.py
create mode 100644 src/gooey/types/object_inpainting_page_request_selected_model.py
create mode 100644 src/gooey/types/object_inpainting_page_response.py
create mode 100644 src/gooey/types/object_inpainting_page_status_response.py
create mode 100644 src/gooey/types/preprocess.py
create mode 100644 src/gooey/types/prompt.py
create mode 100644 src/gooey/types/prompt_tree_node.py
create mode 100644 src/gooey/types/qr_code_generator_page_output.py
create mode 100644 src/gooey/types/qr_code_generator_page_request.py
create mode 100644 src/gooey/types/qr_code_generator_page_request_image_prompt_controlnet_models_item.py
create mode 100644 src/gooey/types/qr_code_generator_page_request_selected_controlnet_model_item.py
create mode 100644 src/gooey/types/qr_code_generator_page_request_selected_model.py
create mode 100644 src/gooey/types/qr_code_generator_page_response.py
create mode 100644 src/gooey/types/qr_code_generator_page_status_response.py
create mode 100644 src/gooey/types/recipe_function.py
create mode 100644 src/gooey/types/recipe_run_state.py
create mode 100644 src/gooey/types/related_doc_search_response.py
create mode 100644 src/gooey/types/related_google_gpt_response.py
create mode 100644 src/gooey/types/related_qn_a_doc_page_output.py
create mode 100644 src/gooey/types/related_qn_a_doc_page_request.py
create mode 100644 src/gooey/types/related_qn_a_doc_page_request_citation_style.py
create mode 100644 src/gooey/types/related_qn_a_doc_page_request_embedding_model.py
create mode 100644 src/gooey/types/related_qn_a_doc_page_request_keyword_query.py
create mode 100644 src/gooey/types/related_qn_a_doc_page_request_selected_model.py
create mode 100644 src/gooey/types/related_qn_a_doc_page_response.py
create mode 100644 src/gooey/types/related_qn_a_doc_page_status_response.py
create mode 100644 src/gooey/types/related_qn_a_page_output.py
create mode 100644 src/gooey/types/related_qn_a_page_request.py
create mode 100644 src/gooey/types/related_qn_a_page_request_embedding_model.py
create mode 100644 src/gooey/types/related_qn_a_page_request_selected_model.py
create mode 100644 src/gooey/types/related_qn_a_page_response.py
create mode 100644 src/gooey/types/related_qn_a_page_status_response.py
create mode 100644 src/gooey/types/reply_button.py
create mode 100644 src/gooey/types/response_model.py
create mode 100644 src/gooey/types/response_model_final_keyword_query.py
create mode 100644 src/gooey/types/response_model_final_prompt.py
create mode 100644 src/gooey/types/role.py
create mode 100644 src/gooey/types/run_settings.py
create mode 100644 src/gooey/types/run_settings_retention_policy.py
create mode 100644 src/gooey/types/run_start.py
create mode 100644 src/gooey/types/sad_talker_settings.py
create mode 100644 src/gooey/types/scheduler.py
create mode 100644 src/gooey/types/search_reference.py
create mode 100644 src/gooey/types/seo_summary_page_output.py
create mode 100644 src/gooey/types/seo_summary_page_request.py
create mode 100644 src/gooey/types/seo_summary_page_request_selected_model.py
create mode 100644 src/gooey/types/seo_summary_page_response.py
create mode 100644 src/gooey/types/seo_summary_page_status_response.py
create mode 100644 src/gooey/types/serp_search_location.py
create mode 100644 src/gooey/types/serp_search_type.py
create mode 100644 src/gooey/types/smart_gpt_page_output.py
create mode 100644 src/gooey/types/smart_gpt_page_request.py
create mode 100644 src/gooey/types/smart_gpt_page_request_selected_model.py
create mode 100644 src/gooey/types/smart_gpt_page_response.py
create mode 100644 src/gooey/types/smart_gpt_page_status_response.py
create mode 100644 src/gooey/types/social_lookup_email_page_output.py
create mode 100644 src/gooey/types/social_lookup_email_page_request.py
create mode 100644 src/gooey/types/social_lookup_email_page_request_selected_model.py
create mode 100644 src/gooey/types/social_lookup_email_page_response.py
create mode 100644 src/gooey/types/social_lookup_email_page_status_response.py
create mode 100644 src/gooey/types/stream_error.py
create mode 100644 src/gooey/types/text2audio_page_output.py
create mode 100644 src/gooey/types/text2audio_page_request.py
create mode 100644 src/gooey/types/text2audio_page_response.py
create mode 100644 src/gooey/types/text2audio_page_status_response.py
create mode 100644 src/gooey/types/text_to_speech_page_output.py
create mode 100644 src/gooey/types/text_to_speech_page_request.py
create mode 100644 src/gooey/types/text_to_speech_page_request_openai_tts_model.py
create mode 100644 src/gooey/types/text_to_speech_page_request_openai_voice_name.py
create mode 100644 src/gooey/types/text_to_speech_page_request_tts_provider.py
create mode 100644 src/gooey/types/text_to_speech_page_response.py
create mode 100644 src/gooey/types/text_to_speech_page_status_response.py
create mode 100644 src/gooey/types/training_data_model.py
create mode 100644 src/gooey/types/translation_page_output.py
create mode 100644 src/gooey/types/translation_page_request.py
create mode 100644 src/gooey/types/translation_page_request_selected_model.py
create mode 100644 src/gooey/types/translation_page_response.py
create mode 100644 src/gooey/types/translation_page_status_response.py
create mode 100644 src/gooey/types/trigger.py
create mode 100644 src/gooey/types/validation_error.py
create mode 100644 src/gooey/types/validation_error_loc_item.py
create mode 100644 src/gooey/types/vcard.py
create mode 100644 src/gooey/types/video_bots_page_output.py
create mode 100644 src/gooey/types/video_bots_page_output_final_keyword_query.py
create mode 100644 src/gooey/types/video_bots_page_output_final_prompt.py
create mode 100644 src/gooey/types/video_bots_page_request.py
create mode 100644 src/gooey/types/video_bots_page_request_asr_model.py
create mode 100644 src/gooey/types/video_bots_page_request_citation_style.py
create mode 100644 src/gooey/types/video_bots_page_request_embedding_model.py
create mode 100644 src/gooey/types/video_bots_page_request_lipsync_model.py
create mode 100644 src/gooey/types/video_bots_page_request_openai_tts_model.py
create mode 100644 src/gooey/types/video_bots_page_request_openai_voice_name.py
create mode 100644 src/gooey/types/video_bots_page_request_selected_model.py
create mode 100644 src/gooey/types/video_bots_page_request_translation_model.py
create mode 100644 src/gooey/types/video_bots_page_request_tts_provider.py
create mode 100644 src/gooey/types/video_bots_page_response.py
create mode 100644 src/gooey/types/video_bots_page_status_response.py
create mode 100644 src/gooey/version.py
create mode 100644 src/gooey/web_search_gpt3/__init__.py
create mode 100644 src/gooey/web_search_gpt3/client.py
create mode 100644 tests/custom/test_client.py
create mode 100644 tests/utils/test_http_client.py
create mode 100644 tests/utils/test_query_encoding.py
diff --git a/.fernignore b/.fernignore
new file mode 100644
index 0000000..084a8eb
--- /dev/null
+++ b/.fernignore
@@ -0,0 +1 @@
+# Specify files that shouldn't be modified by Fern
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
new file mode 100644
index 0000000..bb1b2d3
--- /dev/null
+++ b/.github/workflows/ci.yml
@@ -0,0 +1,61 @@
+name: ci
+
+on: [push]
+jobs:
+ compile:
+ runs-on: ubuntu-20.04
+ steps:
+ - name: Checkout repo
+ uses: actions/checkout@v3
+ - name: Set up python
+ uses: actions/setup-python@v4
+ with:
+ python-version: 3.8
+ - name: Bootstrap poetry
+ run: |
+ curl -sSL https://install.python-poetry.org | python - -y --version 1.5.1
+ - name: Install dependencies
+ run: poetry install
+ - name: Compile
+ run: poetry run mypy .
+ test:
+ runs-on: ubuntu-20.04
+ steps:
+ - name: Checkout repo
+ uses: actions/checkout@v3
+ - name: Set up python
+ uses: actions/setup-python@v4
+ with:
+ python-version: 3.8
+ - name: Bootstrap poetry
+ run: |
+ curl -sSL https://install.python-poetry.org | python - -y --version 1.5.1
+ - name: Install dependencies
+ run: poetry install
+
+ - name: Test
+ run: poetry run pytest ./tests/custom/
+
+ publish:
+ needs: [compile, test]
+ if: github.event_name == 'push' && contains(github.ref, 'refs/tags/')
+ runs-on: ubuntu-20.04
+ steps:
+ - name: Checkout repo
+ uses: actions/checkout@v3
+ - name: Set up python
+ uses: actions/setup-python@v4
+ with:
+ python-version: 3.8
+ - name: Bootstrap poetry
+ run: |
+ curl -sSL https://install.python-poetry.org | python - -y --version 1.5.1
+ - name: Install dependencies
+ run: poetry install
+ - name: Publish to pypi
+ run: |
+ poetry config repositories.remote https://upload.pypi.org/legacy/
+ poetry --no-interaction -v publish --build --repository remote --username "$PYPI_USERNAME" --password "$PYPI_PASSWORD"
+ env:
+ PYPI_USERNAME: ${{ secrets.PYPI_USERNAME }}
+ PYPI_PASSWORD: ${{ secrets.PYPI_PASSWORD }}
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..42cb863
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,4 @@
+dist/
+.mypy_cache/
+__pycache__/
+poetry.toml
diff --git a/LICENSE b/LICENSE
deleted file mode 100644
index 261eeb9..0000000
--- a/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..a0b21e8
--- /dev/null
+++ b/README.md
@@ -0,0 +1,131 @@
+# Gooey Python Library
+
+[![fern shield](https://img.shields.io/badge/%F0%9F%8C%BF-SDK%20generated%20by%20Fern-brightgreen)](https://github.com/fern-api/fern)
+[![pypi](https://img.shields.io/pypi/v/gooey)](https://pypi.python.org/pypi/gooey)
+
+The Gooey Python library provides convenient access to the Gooey API from Python.
+
+## Installation
+
+```sh
+pip install gooey
+```
+
+## Usage
+
+Instantiate and use the client with the following:
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.copilot_integrations.video_bots_stream_create(
+ integration_id="integration_id",
+)
+```
+
+## Async Client
+
+The SDK also exports an `async` client so that you can make non-blocking calls to our API.
+
+```python
+import asyncio
+
+from gooey.client import AsyncGooey
+
+client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+
+
+async def main() -> None:
+ await client.copilot_integrations.video_bots_stream_create(
+ integration_id="integration_id",
+ )
+
+
+asyncio.run(main())
+```
+
+## Exception Handling
+
+When the API returns a non-success status code (4xx or 5xx response), a subclass of the following error
+will be thrown.
+
+```python
+from .api_error import ApiError
+
+try:
+ client.copilot_integrations.video_bots_stream_create(...)
+except ApiError as e:
+ print(e.status_code)
+ print(e.body)
+```
+
+## Advanced
+
+### Retries
+
+The SDK is instrumented with automatic retries with exponential backoff. A request will be retried as long
+as the request is deemed retriable and the number of retry attempts has not grown larger than the configured
+retry limit (default: 2).
+
+A request is deemed retriable when any of the following HTTP status codes is returned:
+
+- [408](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/408) (Timeout)
+- [429](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/429) (Too Many Requests)
+- [5XX](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/500) (Internal Server Errors)
+
+Use the `max_retries` request option to configure this behavior.
+
+```python
+client.copilot_integrations.video_bots_stream_create(...,{
+ max_retries=1
+})
+```
+
+### Timeouts
+
+The SDK defaults to a 60 second timeout. You can configure this with a timeout option at the client or request level.
+
+```python
+
+from gooey.client import Gooey
+
+client = Gooey(..., { timeout=20.0 }, )
+
+
+# Override timeout for a specific method
+client.copilot_integrations.video_bots_stream_create(...,{
+ timeout_in_seconds=1
+})
+```
+
+### Custom Client
+
+You can override the `httpx` client to customize it for your use-case. Some common use-cases include support for proxies
+and transports.
+```python
+import httpx
+from gooey.client import Gooey
+
+client = Gooey(
+ ...,
+ http_client=httpx.Client(
+ proxies="http://my.test.proxy.example.com",
+ transport=httpx.HTTPTransport(local_address="0.0.0.0"),
+ ),
+)
+```
+
+## Contributing
+
+While we value open-source contributions to this SDK, this library is generated programmatically.
+Additions made directly to this library would have to be moved over to our generation code,
+otherwise they would be overwritten upon the next generated release. Feel free to open a PR as
+a proof of concept, but know that we will not be able to merge it as-is. We suggest opening
+an issue first to discuss with us!
+
+On the other hand, contributions to the README are always very welcome!
diff --git a/poetry.lock b/poetry.lock
new file mode 100644
index 0000000..a3b66c9
--- /dev/null
+++ b/poetry.lock
@@ -0,0 +1,471 @@
+# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
+
+[[package]]
+name = "annotated-types"
+version = "0.7.0"
+description = "Reusable constraint types to use with typing.Annotated"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"},
+ {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"},
+]
+
+[package.dependencies]
+typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""}
+
+[[package]]
+name = "anyio"
+version = "4.4.0"
+description = "High level compatibility layer for multiple asynchronous event loop implementations"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"},
+ {file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"},
+]
+
+[package.dependencies]
+exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""}
+idna = ">=2.8"
+sniffio = ">=1.1"
+typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""}
+
+[package.extras]
+doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"]
+test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"]
+trio = ["trio (>=0.23)"]
+
+[[package]]
+name = "certifi"
+version = "2024.7.4"
+description = "Python package for providing Mozilla's CA Bundle."
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"},
+ {file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"},
+]
+
+[[package]]
+name = "colorama"
+version = "0.4.6"
+description = "Cross-platform colored terminal text."
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
+files = [
+ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
+ {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
+]
+
+[[package]]
+name = "exceptiongroup"
+version = "1.2.2"
+description = "Backport of PEP 654 (exception groups)"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"},
+ {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"},
+]
+
+[package.extras]
+test = ["pytest (>=6)"]
+
+[[package]]
+name = "h11"
+version = "0.14.0"
+description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"},
+ {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"},
+]
+
+[[package]]
+name = "httpcore"
+version = "1.0.5"
+description = "A minimal low-level HTTP client."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"},
+ {file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"},
+]
+
+[package.dependencies]
+certifi = "*"
+h11 = ">=0.13,<0.15"
+
+[package.extras]
+asyncio = ["anyio (>=4.0,<5.0)"]
+http2 = ["h2 (>=3,<5)"]
+socks = ["socksio (==1.*)"]
+trio = ["trio (>=0.22.0,<0.26.0)"]
+
+[[package]]
+name = "httpx"
+version = "0.27.0"
+description = "The next generation HTTP client."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "httpx-0.27.0-py3-none-any.whl", hash = "sha256:71d5465162c13681bff01ad59b2cc68dd838ea1f10e51574bac27103f00c91a5"},
+ {file = "httpx-0.27.0.tar.gz", hash = "sha256:a0cb88a46f32dc874e04ee956e4c2764aba2aa228f650b06788ba6bda2962ab5"},
+]
+
+[package.dependencies]
+anyio = "*"
+certifi = "*"
+httpcore = "==1.*"
+idna = "*"
+sniffio = "*"
+
+[package.extras]
+brotli = ["brotli", "brotlicffi"]
+cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"]
+http2 = ["h2 (>=3,<5)"]
+socks = ["socksio (==1.*)"]
+
+[[package]]
+name = "idna"
+version = "3.7"
+description = "Internationalized Domain Names in Applications (IDNA)"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"},
+ {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"},
+]
+
+[[package]]
+name = "iniconfig"
+version = "2.0.0"
+description = "brain-dead simple config-ini parsing"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"},
+ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"},
+]
+
+[[package]]
+name = "mypy"
+version = "1.0.1"
+description = "Optional static typing for Python"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "mypy-1.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:71a808334d3f41ef011faa5a5cd8153606df5fc0b56de5b2e89566c8093a0c9a"},
+ {file = "mypy-1.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:920169f0184215eef19294fa86ea49ffd4635dedfdea2b57e45cb4ee85d5ccaf"},
+ {file = "mypy-1.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27a0f74a298769d9fdc8498fcb4f2beb86f0564bcdb1a37b58cbbe78e55cf8c0"},
+ {file = "mypy-1.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:65b122a993d9c81ea0bfde7689b3365318a88bde952e4dfa1b3a8b4ac05d168b"},
+ {file = "mypy-1.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:5deb252fd42a77add936b463033a59b8e48eb2eaec2976d76b6878d031933fe4"},
+ {file = "mypy-1.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2013226d17f20468f34feddd6aae4635a55f79626549099354ce641bc7d40262"},
+ {file = "mypy-1.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:48525aec92b47baed9b3380371ab8ab6e63a5aab317347dfe9e55e02aaad22e8"},
+ {file = "mypy-1.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c96b8a0c019fe29040d520d9257d8c8f122a7343a8307bf8d6d4a43f5c5bfcc8"},
+ {file = "mypy-1.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:448de661536d270ce04f2d7dddaa49b2fdba6e3bd8a83212164d4174ff43aa65"},
+ {file = "mypy-1.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:d42a98e76070a365a1d1c220fcac8aa4ada12ae0db679cb4d910fabefc88b994"},
+ {file = "mypy-1.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e64f48c6176e243ad015e995de05af7f22bbe370dbb5b32bd6988438ec873919"},
+ {file = "mypy-1.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fdd63e4f50e3538617887e9aee91855368d9fc1dea30da743837b0df7373bc4"},
+ {file = "mypy-1.0.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:dbeb24514c4acbc78d205f85dd0e800f34062efcc1f4a4857c57e4b4b8712bff"},
+ {file = "mypy-1.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a2948c40a7dd46c1c33765718936669dc1f628f134013b02ff5ac6c7ef6942bf"},
+ {file = "mypy-1.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5bc8d6bd3b274dd3846597855d96d38d947aedba18776aa998a8d46fabdaed76"},
+ {file = "mypy-1.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:17455cda53eeee0a4adb6371a21dd3dbf465897de82843751cf822605d152c8c"},
+ {file = "mypy-1.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e831662208055b006eef68392a768ff83596035ffd6d846786578ba1714ba8f6"},
+ {file = "mypy-1.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e60d0b09f62ae97a94605c3f73fd952395286cf3e3b9e7b97f60b01ddfbbda88"},
+ {file = "mypy-1.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:0af4f0e20706aadf4e6f8f8dc5ab739089146b83fd53cb4a7e0e850ef3de0bb6"},
+ {file = "mypy-1.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:24189f23dc66f83b839bd1cce2dfc356020dfc9a8bae03978477b15be61b062e"},
+ {file = "mypy-1.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:93a85495fb13dc484251b4c1fd7a5ac370cd0d812bbfc3b39c1bafefe95275d5"},
+ {file = "mypy-1.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f546ac34093c6ce33f6278f7c88f0f147a4849386d3bf3ae193702f4fe31407"},
+ {file = "mypy-1.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c6c2ccb7af7154673c591189c3687b013122c5a891bb5651eca3db8e6c6c55bd"},
+ {file = "mypy-1.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:15b5a824b58c7c822c51bc66308e759243c32631896743f030daf449fe3677f3"},
+ {file = "mypy-1.0.1-py3-none-any.whl", hash = "sha256:eda5c8b9949ed411ff752b9a01adda31afe7eae1e53e946dbdf9db23865e66c4"},
+ {file = "mypy-1.0.1.tar.gz", hash = "sha256:28cea5a6392bb43d266782983b5a4216c25544cd7d80be681a155ddcdafd152d"},
+]
+
+[package.dependencies]
+mypy-extensions = ">=0.4.3"
+tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
+typing-extensions = ">=3.10"
+
+[package.extras]
+dmypy = ["psutil (>=4.0)"]
+install-types = ["pip"]
+python2 = ["typed-ast (>=1.4.0,<2)"]
+reports = ["lxml"]
+
+[[package]]
+name = "mypy-extensions"
+version = "1.0.0"
+description = "Type system extensions for programs checked with the mypy type checker."
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"},
+ {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"},
+]
+
+[[package]]
+name = "packaging"
+version = "24.1"
+description = "Core utilities for Python packages"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"},
+ {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"},
+]
+
+[[package]]
+name = "pluggy"
+version = "1.5.0"
+description = "plugin and hook calling mechanisms for python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"},
+ {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"},
+]
+
+[package.extras]
+dev = ["pre-commit", "tox"]
+testing = ["pytest", "pytest-benchmark"]
+
+[[package]]
+name = "pydantic"
+version = "2.8.2"
+description = "Data validation using Python type hints"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pydantic-2.8.2-py3-none-any.whl", hash = "sha256:73ee9fddd406dc318b885c7a2eab8a6472b68b8fb5ba8150949fc3db939f23c8"},
+ {file = "pydantic-2.8.2.tar.gz", hash = "sha256:6f62c13d067b0755ad1c21a34bdd06c0c12625a22b0fc09c6b149816604f7c2a"},
+]
+
+[package.dependencies]
+annotated-types = ">=0.4.0"
+pydantic-core = "2.20.1"
+typing-extensions = [
+ {version = ">=4.12.2", markers = "python_version >= \"3.13\""},
+ {version = ">=4.6.1", markers = "python_version < \"3.13\""},
+]
+
+[package.extras]
+email = ["email-validator (>=2.0.0)"]
+
+[[package]]
+name = "pydantic-core"
+version = "2.20.1"
+description = "Core functionality for Pydantic validation and serialization"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pydantic_core-2.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3acae97ffd19bf091c72df4d726d552c473f3576409b2a7ca36b2f535ffff4a3"},
+ {file = "pydantic_core-2.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:41f4c96227a67a013e7de5ff8f20fb496ce573893b7f4f2707d065907bffdbd6"},
+ {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f239eb799a2081495ea659d8d4a43a8f42cd1fe9ff2e7e436295c38a10c286a"},
+ {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53e431da3fc53360db73eedf6f7124d1076e1b4ee4276b36fb25514544ceb4a3"},
+ {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1f62b2413c3a0e846c3b838b2ecd6c7a19ec6793b2a522745b0869e37ab5bc1"},
+ {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d41e6daee2813ecceea8eda38062d69e280b39df793f5a942fa515b8ed67953"},
+ {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d482efec8b7dc6bfaedc0f166b2ce349df0011f5d2f1f25537ced4cfc34fd98"},
+ {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e93e1a4b4b33daed65d781a57a522ff153dcf748dee70b40c7258c5861e1768a"},
+ {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7c4ea22b6739b162c9ecaaa41d718dfad48a244909fe7ef4b54c0b530effc5a"},
+ {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4f2790949cf385d985a31984907fecb3896999329103df4e4983a4a41e13e840"},
+ {file = "pydantic_core-2.20.1-cp310-none-win32.whl", hash = "sha256:5e999ba8dd90e93d57410c5e67ebb67ffcaadcea0ad973240fdfd3a135506250"},
+ {file = "pydantic_core-2.20.1-cp310-none-win_amd64.whl", hash = "sha256:512ecfbefef6dac7bc5eaaf46177b2de58cdf7acac8793fe033b24ece0b9566c"},
+ {file = "pydantic_core-2.20.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d2a8fa9d6d6f891f3deec72f5cc668e6f66b188ab14bb1ab52422fe8e644f312"},
+ {file = "pydantic_core-2.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:175873691124f3d0da55aeea1d90660a6ea7a3cfea137c38afa0a5ffabe37b88"},
+ {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37eee5b638f0e0dcd18d21f59b679686bbd18917b87db0193ae36f9c23c355fc"},
+ {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25e9185e2d06c16ee438ed39bf62935ec436474a6ac4f9358524220f1b236e43"},
+ {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:150906b40ff188a3260cbee25380e7494ee85048584998c1e66df0c7a11c17a6"},
+ {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ad4aeb3e9a97286573c03df758fc7627aecdd02f1da04516a86dc159bf70121"},
+ {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3f3ed29cd9f978c604708511a1f9c2fdcb6c38b9aae36a51905b8811ee5cbf1"},
+ {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0dae11d8f5ded51699c74d9548dcc5938e0804cc8298ec0aa0da95c21fff57b"},
+ {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:faa6b09ee09433b87992fb5a2859efd1c264ddc37280d2dd5db502126d0e7f27"},
+ {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9dc1b507c12eb0481d071f3c1808f0529ad41dc415d0ca11f7ebfc666e66a18b"},
+ {file = "pydantic_core-2.20.1-cp311-none-win32.whl", hash = "sha256:fa2fddcb7107e0d1808086ca306dcade7df60a13a6c347a7acf1ec139aa6789a"},
+ {file = "pydantic_core-2.20.1-cp311-none-win_amd64.whl", hash = "sha256:40a783fb7ee353c50bd3853e626f15677ea527ae556429453685ae32280c19c2"},
+ {file = "pydantic_core-2.20.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:595ba5be69b35777474fa07f80fc260ea71255656191adb22a8c53aba4479231"},
+ {file = "pydantic_core-2.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a4f55095ad087474999ee28d3398bae183a66be4823f753cd7d67dd0153427c9"},
+ {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9aa05d09ecf4c75157197f27cdc9cfaeb7c5f15021c6373932bf3e124af029f"},
+ {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e97fdf088d4b31ff4ba35db26d9cc472ac7ef4a2ff2badeabf8d727b3377fc52"},
+ {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc633a9fe1eb87e250b5c57d389cf28998e4292336926b0b6cdaee353f89a237"},
+ {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d573faf8eb7e6b1cbbcb4f5b247c60ca8be39fe2c674495df0eb4318303137fe"},
+ {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26dc97754b57d2fd00ac2b24dfa341abffc380b823211994c4efac7f13b9e90e"},
+ {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:33499e85e739a4b60c9dac710c20a08dc73cb3240c9a0e22325e671b27b70d24"},
+ {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bebb4d6715c814597f85297c332297c6ce81e29436125ca59d1159b07f423eb1"},
+ {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:516d9227919612425c8ef1c9b869bbbee249bc91912c8aaffb66116c0b447ebd"},
+ {file = "pydantic_core-2.20.1-cp312-none-win32.whl", hash = "sha256:469f29f9093c9d834432034d33f5fe45699e664f12a13bf38c04967ce233d688"},
+ {file = "pydantic_core-2.20.1-cp312-none-win_amd64.whl", hash = "sha256:035ede2e16da7281041f0e626459bcae33ed998cca6a0a007a5ebb73414ac72d"},
+ {file = "pydantic_core-2.20.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:0827505a5c87e8aa285dc31e9ec7f4a17c81a813d45f70b1d9164e03a813a686"},
+ {file = "pydantic_core-2.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:19c0fa39fa154e7e0b7f82f88ef85faa2a4c23cc65aae2f5aea625e3c13c735a"},
+ {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa223cd1e36b642092c326d694d8bf59b71ddddc94cdb752bbbb1c5c91d833b"},
+ {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c336a6d235522a62fef872c6295a42ecb0c4e1d0f1a3e500fe949415761b8a19"},
+ {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7eb6a0587eded33aeefea9f916899d42b1799b7b14b8f8ff2753c0ac1741edac"},
+ {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:70c8daf4faca8da5a6d655f9af86faf6ec2e1768f4b8b9d0226c02f3d6209703"},
+ {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9fa4c9bf273ca41f940bceb86922a7667cd5bf90e95dbb157cbb8441008482c"},
+ {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:11b71d67b4725e7e2a9f6e9c0ac1239bbc0c48cce3dc59f98635efc57d6dac83"},
+ {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:270755f15174fb983890c49881e93f8f1b80f0b5e3a3cc1394a255706cabd203"},
+ {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c81131869240e3e568916ef4c307f8b99583efaa60a8112ef27a366eefba8ef0"},
+ {file = "pydantic_core-2.20.1-cp313-none-win32.whl", hash = "sha256:b91ced227c41aa29c672814f50dbb05ec93536abf8f43cd14ec9521ea09afe4e"},
+ {file = "pydantic_core-2.20.1-cp313-none-win_amd64.whl", hash = "sha256:65db0f2eefcaad1a3950f498aabb4875c8890438bc80b19362cf633b87a8ab20"},
+ {file = "pydantic_core-2.20.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4745f4ac52cc6686390c40eaa01d48b18997cb130833154801a442323cc78f91"},
+ {file = "pydantic_core-2.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a8ad4c766d3f33ba8fd692f9aa297c9058970530a32c728a2c4bfd2616d3358b"},
+ {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41e81317dd6a0127cabce83c0c9c3fbecceae981c8391e6f1dec88a77c8a569a"},
+ {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04024d270cf63f586ad41fff13fde4311c4fc13ea74676962c876d9577bcc78f"},
+ {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eaad4ff2de1c3823fddf82f41121bdf453d922e9a238642b1dedb33c4e4f98ad"},
+ {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:26ab812fa0c845df815e506be30337e2df27e88399b985d0bb4e3ecfe72df31c"},
+ {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c5ebac750d9d5f2706654c638c041635c385596caf68f81342011ddfa1e5598"},
+ {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2aafc5a503855ea5885559eae883978c9b6d8c8993d67766ee73d82e841300dd"},
+ {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4868f6bd7c9d98904b748a2653031fc9c2f85b6237009d475b1008bfaeb0a5aa"},
+ {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa2f457b4af386254372dfa78a2eda2563680d982422641a85f271c859df1987"},
+ {file = "pydantic_core-2.20.1-cp38-none-win32.whl", hash = "sha256:225b67a1f6d602de0ce7f6c1c3ae89a4aa25d3de9be857999e9124f15dab486a"},
+ {file = "pydantic_core-2.20.1-cp38-none-win_amd64.whl", hash = "sha256:6b507132dcfc0dea440cce23ee2182c0ce7aba7054576efc65634f080dbe9434"},
+ {file = "pydantic_core-2.20.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b03f7941783b4c4a26051846dea594628b38f6940a2fdc0df00b221aed39314c"},
+ {file = "pydantic_core-2.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1eedfeb6089ed3fad42e81a67755846ad4dcc14d73698c120a82e4ccf0f1f9f6"},
+ {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:635fee4e041ab9c479e31edda27fcf966ea9614fff1317e280d99eb3e5ab6fe2"},
+ {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:77bf3ac639c1ff567ae3b47f8d4cc3dc20f9966a2a6dd2311dcc055d3d04fb8a"},
+ {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ed1b0132f24beeec5a78b67d9388656d03e6a7c837394f99257e2d55b461611"},
+ {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c6514f963b023aeee506678a1cf821fe31159b925c4b76fe2afa94cc70b3222b"},
+ {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10d4204d8ca33146e761c79f83cc861df20e7ae9f6487ca290a97702daf56006"},
+ {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2d036c7187b9422ae5b262badb87a20a49eb6c5238b2004e96d4da1231badef1"},
+ {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9ebfef07dbe1d93efb94b4700f2d278494e9162565a54f124c404a5656d7ff09"},
+ {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6b9d9bb600328a1ce523ab4f454859e9d439150abb0906c5a1983c146580ebab"},
+ {file = "pydantic_core-2.20.1-cp39-none-win32.whl", hash = "sha256:784c1214cb6dd1e3b15dd8b91b9a53852aed16671cc3fbe4786f4f1db07089e2"},
+ {file = "pydantic_core-2.20.1-cp39-none-win_amd64.whl", hash = "sha256:d2fe69c5434391727efa54b47a1e7986bb0186e72a41b203df8f5b0a19a4f669"},
+ {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a45f84b09ac9c3d35dfcf6a27fd0634d30d183205230a0ebe8373a0e8cfa0906"},
+ {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d02a72df14dfdbaf228424573a07af10637bd490f0901cee872c4f434a735b94"},
+ {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2b27e6af28f07e2f195552b37d7d66b150adbaa39a6d327766ffd695799780f"},
+ {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:084659fac3c83fd674596612aeff6041a18402f1e1bc19ca39e417d554468482"},
+ {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:242b8feb3c493ab78be289c034a1f659e8826e2233786e36f2893a950a719bb6"},
+ {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:38cf1c40a921d05c5edc61a785c0ddb4bed67827069f535d794ce6bcded919fc"},
+ {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e0bbdd76ce9aa5d4209d65f2b27fc6e5ef1312ae6c5333c26db3f5ade53a1e99"},
+ {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:254ec27fdb5b1ee60684f91683be95e5133c994cc54e86a0b0963afa25c8f8a6"},
+ {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:407653af5617f0757261ae249d3fba09504d7a71ab36ac057c938572d1bc9331"},
+ {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c693e916709c2465b02ca0ad7b387c4f8423d1db7b4649c551f27a529181c5ad"},
+ {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b5ff4911aea936a47d9376fd3ab17e970cc543d1b68921886e7f64bd28308d1"},
+ {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177f55a886d74f1808763976ac4efd29b7ed15c69f4d838bbd74d9d09cf6fa86"},
+ {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:964faa8a861d2664f0c7ab0c181af0bea66098b1919439815ca8803ef136fc4e"},
+ {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4dd484681c15e6b9a977c785a345d3e378d72678fd5f1f3c0509608da24f2ac0"},
+ {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f6d6cff3538391e8486a431569b77921adfcdef14eb18fbf19b7c0a5294d4e6a"},
+ {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a6d511cc297ff0883bc3708b465ff82d7560193169a8b93260f74ecb0a5e08a7"},
+ {file = "pydantic_core-2.20.1.tar.gz", hash = "sha256:26ca695eeee5f9f1aeeb211ffc12f10bcb6f71e2989988fda61dabd65db878d4"},
+]
+
+[package.dependencies]
+typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
+
+[[package]]
+name = "pytest"
+version = "7.4.4"
+description = "pytest: simple powerful testing with Python"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"},
+ {file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"},
+]
+
+[package.dependencies]
+colorama = {version = "*", markers = "sys_platform == \"win32\""}
+exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""}
+iniconfig = "*"
+packaging = "*"
+pluggy = ">=0.12,<2.0"
+tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""}
+
+[package.extras]
+testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"]
+
+[[package]]
+name = "pytest-asyncio"
+version = "0.23.8"
+description = "Pytest support for asyncio"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2"},
+ {file = "pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3"},
+]
+
+[package.dependencies]
+pytest = ">=7.0.0,<9"
+
+[package.extras]
+docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"]
+testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"]
+
+[[package]]
+name = "python-dateutil"
+version = "2.9.0.post0"
+description = "Extensions to the standard Python datetime module"
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
+files = [
+ {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"},
+ {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"},
+]
+
+[package.dependencies]
+six = ">=1.5"
+
+[[package]]
+name = "six"
+version = "1.16.0"
+description = "Python 2 and 3 compatibility utilities"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*"
+files = [
+ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"},
+ {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"},
+]
+
+[[package]]
+name = "sniffio"
+version = "1.3.1"
+description = "Sniff out which async library your code is running under"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"},
+ {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"},
+]
+
+[[package]]
+name = "tomli"
+version = "2.0.1"
+description = "A lil' TOML parser"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"},
+ {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"},
+]
+
+[[package]]
+name = "types-python-dateutil"
+version = "2.9.0.20240316"
+description = "Typing stubs for python-dateutil"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "types-python-dateutil-2.9.0.20240316.tar.gz", hash = "sha256:5d2f2e240b86905e40944dd787db6da9263f0deabef1076ddaed797351ec0202"},
+ {file = "types_python_dateutil-2.9.0.20240316-py3-none-any.whl", hash = "sha256:6b8cb66d960771ce5ff974e9dd45e38facb81718cc1e208b10b1baccbfdbee3b"},
+]
+
+[[package]]
+name = "typing-extensions"
+version = "4.12.2"
+description = "Backported and Experimental Type Hints for Python 3.8+"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"},
+ {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"},
+]
+
+[metadata]
+lock-version = "2.0"
+python-versions = "^3.8"
+content-hash = "7fa2085bd251148908cf9a89f13b158fe85ccb037bb44614ae5f150ceecee53c"
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000..026c991
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,56 @@
+[tool.poetry]
+name = "gooey"
+version = "0.0.0"
+description = ""
+readme = "README.md"
+authors = []
+keywords = []
+
+classifiers = [
+ "Intended Audience :: Developers",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.10",
+ "Programming Language :: Python :: 3.11",
+ "Programming Language :: Python :: 3.12",
+ "Operating System :: OS Independent",
+ "Operating System :: POSIX",
+ "Operating System :: MacOS",
+ "Operating System :: POSIX :: Linux",
+ "Operating System :: Microsoft :: Windows",
+ "Topic :: Software Development :: Libraries :: Python Modules",
+ "Typing :: Typed"
+]
+packages = [
+ { include = "gooey", from = "src"}
+]
+
+[project.urls]
+Repository = 'https://github.com/GooeyAI/python-sdk'
+
+[tool.poetry.dependencies]
+python = "^3.8"
+httpx = ">=0.21.2"
+pydantic = ">= 1.9.2"
+typing_extensions = ">= 4.0.0"
+
+[tool.poetry.dev-dependencies]
+mypy = "1.0.1"
+pytest = "^7.4.0"
+pytest-asyncio = "^0.23.5"
+python-dateutil = "^2.9.0"
+types-python-dateutil = "^2.9.0.20240316"
+
+[tool.pytest.ini_options]
+testpaths = [ "tests" ]
+asyncio_mode = "auto"
+
+[tool.mypy]
+plugins = ["pydantic.mypy"]
+
+
+[build-system]
+requires = ["poetry-core"]
+build-backend = "poetry.core.masonry.api"
diff --git a/reference.md b/reference.md
new file mode 100644
index 0000000..c3f1806
--- /dev/null
+++ b/reference.md
@@ -0,0 +1,14921 @@
+# Reference
+## CopilotIntegrations
+client.copilot_integrations.video_bots_stream_create(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.copilot_integrations.video_bots_stream_create(
+ integration_id="integration_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**integration_id:** `str` — Your Integration ID as shown in the Copilot Integrations tab
+
+
+
+
+
+-
+
+**conversation_id:** `typing.Optional[str]`
+
+The gooey conversation ID.
+
+If not provided, a new conversation will be started and a new ID will be returned in the response. Use this to maintain the state of the conversation between requests.
+
+Note that you may not provide a custom ID here, and must only use the `conversation_id` returned in a previous response.
+
+
+
+
+
+-
+
+**user_id:** `typing.Optional[str]`
+
+Your app's custom user ID.
+
+If not provided, a random user will be created and a new ID will be returned in the response. If a `conversation_id` is provided, this field is automatically set to the user's id associated with that conversation.
+
+
+
+
+
+-
+
+**user_message_id:** `typing.Optional[str]`
+
+Your app's custom message ID for the user message.
+
+If not provided, a random ID will be generated and returned in the response. This is useful for tracking messages in the conversation.
+
+
+
+
+
+-
+
+**button_pressed:** `typing.Optional[ButtonPressed]` — The button that was pressed by the user.
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**input_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**input_audio:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**input_images:** `typing.Optional[typing.Sequence[str]]`
+
+
+
+
+
+-
+
+**input_documents:** `typing.Optional[typing.Sequence[str]]`
+
+
+
+
+
+-
+
+**doc_extract_url:** `typing.Optional[str]` — Select a workflow to extract text from documents and images.
+
+
+
+
+
+-
+
+**messages:** `typing.Optional[typing.Sequence[ConversationEntry]]`
+
+
+
+
+
+-
+
+**bot_script:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[CreateStreamRequestSelectedModel]`
+
+
+
+
+
+-
+
+**document_model:** `typing.Optional[str]` — When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api))
+
+
+
+
+
+-
+
+**avoid_repetition:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**max_tokens:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sampling_temperature:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**task_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**query_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**keyword_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**documents:** `typing.Optional[typing.Sequence[str]]`
+
+
+
+
+
+-
+
+**max_references:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**max_context_words:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**scroll_jump:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**embedding_model:** `typing.Optional[CreateStreamRequestEmbeddingModel]`
+
+
+
+
+
+-
+
+**dense_weight:** `typing.Optional[float]`
+
+
+Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+
+
+
+
+
+-
+
+**citation_style:** `typing.Optional[CreateStreamRequestCitationStyle]`
+
+
+
+
+
+-
+
+**use_url_shortener:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**asr_model:** `typing.Optional[CreateStreamRequestAsrModel]` — Choose a model to transcribe incoming audio messages to text.
+
+
+
+
+
+-
+
+**asr_language:** `typing.Optional[str]` — Choose a language to transcribe incoming audio messages to text.
+
+
+
+
+
+-
+
+**translation_model:** `typing.Optional[CreateStreamRequestTranslationModel]`
+
+
+
+
+
+-
+
+**user_language:** `typing.Optional[str]` — Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages.
+
+
+
+
+
+-
+
+**input_glossary_document:** `typing.Optional[str]`
+
+
+Translation Glossary for User Langauge -> LLM Language (English)
+
+
+
+
+
+
+-
+
+**output_glossary_document:** `typing.Optional[str]`
+
+
+Translation Glossary for LLM Language (English) -> User Langauge
+
+
+
+
+
+
+-
+
+**lipsync_model:** `typing.Optional[CreateStreamRequestLipsyncModel]`
+
+
+
+
+
+-
+
+**tools:** `typing.Optional[typing.Sequence[LlmTools]]` — Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
+
+
+
+
+
+-
+
+**tts_provider:** `typing.Optional[CreateStreamRequestTtsProvider]`
+
+
+
+
+
+-
+
+**uberduck_voice_name:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**uberduck_speaking_rate:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**google_voice_name:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**google_speaking_rate:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**google_pitch:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**bark_history_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead
+
+
+
+
+
+-
+
+**elevenlabs_api_key:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**elevenlabs_voice_id:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**elevenlabs_model:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**elevenlabs_stability:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**elevenlabs_similarity_boost:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**elevenlabs_style:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**elevenlabs_speaker_boost:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**azure_voice_name:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**openai_voice_name:** `typing.Optional[CreateStreamRequestOpenaiVoiceName]`
+
+
+
+
+
+-
+
+**openai_tts_model:** `typing.Optional[CreateStreamRequestOpenaiTtsModel]`
+
+
+
+
+
+-
+
+**input_face:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**face_padding_top:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**face_padding_bottom:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**face_padding_left:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**face_padding_right:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sadtalker_settings:** `typing.Optional[SadTalkerSettings]`
+
+
+
+
+
+-
+
+**input_text:** `typing.Optional[str]` — Use `input_prompt` instead
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.copilot_integrations.video_bots_stream(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.copilot_integrations.video_bots_stream(
+ request_id="request_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_id:** `str`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## CopilotForYourEnterprise
+client.copilot_for_your_enterprise.video_bots(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.copilot_for_your_enterprise.video_bots()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**input_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**input_audio:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**input_images:** `typing.Optional[typing.Sequence[str]]`
+
+
+
+
+
+-
+
+**input_documents:** `typing.Optional[typing.Sequence[str]]`
+
+
+
+
+
+-
+
+**doc_extract_url:** `typing.Optional[str]` — Select a workflow to extract text from documents and images.
+
+
+
+
+
+-
+
+**messages:** `typing.Optional[typing.Sequence[ConversationEntry]]`
+
+
+
+
+
+-
+
+**bot_script:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[VideoBotsPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**document_model:** `typing.Optional[str]` — When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api))
+
+
+
+
+
+-
+
+**avoid_repetition:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**max_tokens:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sampling_temperature:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**task_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**query_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**keyword_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**documents:** `typing.Optional[typing.Sequence[str]]`
+
+
+
+
+
+-
+
+**max_references:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**max_context_words:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**scroll_jump:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**embedding_model:** `typing.Optional[VideoBotsPageRequestEmbeddingModel]`
+
+
+
+
+
+-
+
+**dense_weight:** `typing.Optional[float]`
+
+Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+
+
+
+
+-
+
+**citation_style:** `typing.Optional[VideoBotsPageRequestCitationStyle]`
+
+
+
+
+
+-
+
+**use_url_shortener:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**asr_model:** `typing.Optional[VideoBotsPageRequestAsrModel]` — Choose a model to transcribe incoming audio messages to text.
+
+
+
+
+
+-
+
+**asr_language:** `typing.Optional[str]` — Choose a language to transcribe incoming audio messages to text.
+
+
+
+
+
+-
+
+**translation_model:** `typing.Optional[VideoBotsPageRequestTranslationModel]`
+
+
+
+
+
+-
+
+**user_language:** `typing.Optional[str]` — Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages.
+
+
+
+
+
+-
+
+**input_glossary_document:** `typing.Optional[str]` — Translation Glossary for User Langauge -> LLM Language (English)
+
+
+
+
+
+-
+
+**output_glossary_document:** `typing.Optional[str]` — Translation Glossary for LLM Language (English) -> User Langauge
+
+
+
+
+
+-
+
+**lipsync_model:** `typing.Optional[VideoBotsPageRequestLipsyncModel]`
+
+
+
+
+
+-
+
+**tools:** `typing.Optional[typing.Sequence[LlmTools]]` — Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
+
+
+
+
+
+-
+
+**tts_provider:** `typing.Optional[VideoBotsPageRequestTtsProvider]`
+
+
+
+
+
+-
+
+**uberduck_voice_name:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**uberduck_speaking_rate:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**google_voice_name:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**google_speaking_rate:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**google_pitch:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**bark_history_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead
+
+
+
+
+
+-
+
+**elevenlabs_api_key:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**elevenlabs_voice_id:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**elevenlabs_model:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**elevenlabs_stability:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**elevenlabs_similarity_boost:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**elevenlabs_style:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**elevenlabs_speaker_boost:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**azure_voice_name:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**openai_voice_name:** `typing.Optional[VideoBotsPageRequestOpenaiVoiceName]`
+
+
+
+
+
+-
+
+**openai_tts_model:** `typing.Optional[VideoBotsPageRequestOpenaiTtsModel]`
+
+
+
+
+
+-
+
+**input_face:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**face_padding_top:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**face_padding_bottom:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**face_padding_left:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**face_padding_right:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sadtalker_settings:** `typing.Optional[SadTalkerSettings]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.copilot_for_your_enterprise.async_video_bots(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.copilot_for_your_enterprise.async_video_bots()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**input_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**input_audio:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**input_images:** `typing.Optional[typing.Sequence[str]]`
+
+
+
+
+
+-
+
+**input_documents:** `typing.Optional[typing.Sequence[str]]`
+
+
+
+
+
+-
+
+**doc_extract_url:** `typing.Optional[str]` — Select a workflow to extract text from documents and images.
+
+
+
+
+
+-
+
+**messages:** `typing.Optional[typing.Sequence[ConversationEntry]]`
+
+
+
+
+
+-
+
+**bot_script:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[VideoBotsPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**document_model:** `typing.Optional[str]` — When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api))
+
+
+
+
+
+-
+
+**avoid_repetition:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**max_tokens:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sampling_temperature:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**task_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**query_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**keyword_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**documents:** `typing.Optional[typing.Sequence[str]]`
+
+
+
+
+
+-
+
+**max_references:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**max_context_words:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**scroll_jump:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**embedding_model:** `typing.Optional[VideoBotsPageRequestEmbeddingModel]`
+
+
+
+
+
+-
+
+**dense_weight:** `typing.Optional[float]`
+
+Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+
+
+
+
+-
+
+**citation_style:** `typing.Optional[VideoBotsPageRequestCitationStyle]`
+
+
+
+
+
+-
+
+**use_url_shortener:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**asr_model:** `typing.Optional[VideoBotsPageRequestAsrModel]` — Choose a model to transcribe incoming audio messages to text.
+
+
+
+
+
+-
+
+**asr_language:** `typing.Optional[str]` — Choose a language to transcribe incoming audio messages to text.
+
+
+
+
+
+-
+
+**translation_model:** `typing.Optional[VideoBotsPageRequestTranslationModel]`
+
+
+
+
+
+-
+
+**user_language:** `typing.Optional[str]` — Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages.
+
+
+
+
+
+-
+
+**input_glossary_document:** `typing.Optional[str]` — Translation Glossary for User Langauge -> LLM Language (English)
+
+
+
+
+
+-
+
+**output_glossary_document:** `typing.Optional[str]` — Translation Glossary for LLM Language (English) -> User Langauge
+
+
+
+
+
+-
+
+**lipsync_model:** `typing.Optional[VideoBotsPageRequestLipsyncModel]`
+
+
+
+
+
+-
+
+**tools:** `typing.Optional[typing.Sequence[LlmTools]]` — Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
+
+
+
+
+
+-
+
+**tts_provider:** `typing.Optional[VideoBotsPageRequestTtsProvider]`
+
+
+
+
+
+-
+
+**uberduck_voice_name:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**uberduck_speaking_rate:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**google_voice_name:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**google_speaking_rate:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**google_pitch:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**bark_history_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead
+
+
+
+
+
+-
+
+**elevenlabs_api_key:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**elevenlabs_voice_id:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**elevenlabs_model:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**elevenlabs_stability:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**elevenlabs_similarity_boost:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**elevenlabs_style:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**elevenlabs_speaker_boost:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**azure_voice_name:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**openai_voice_name:** `typing.Optional[VideoBotsPageRequestOpenaiVoiceName]`
+
+
+
+
+
+-
+
+**openai_tts_model:** `typing.Optional[VideoBotsPageRequestOpenaiTtsModel]`
+
+
+
+
+
+-
+
+**input_face:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**face_padding_top:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**face_padding_bottom:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**face_padding_left:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**face_padding_right:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sadtalker_settings:** `typing.Optional[SadTalkerSettings]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.copilot_for_your_enterprise.status_video_bots(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.copilot_for_your_enterprise.status_video_bots(
+ run_id="run_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**run_id:** `str`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## AiAnimationGenerator
+client.ai_animation_generator.deforum_sd(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import AnimationPrompt
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.ai_animation_generator.deforum_sd(
+ animation_prompts=[
+ AnimationPrompt(
+ frame="frame",
+ prompt="prompt",
+ )
+ ],
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**animation_prompts:** `typing.Sequence[AnimationPrompt]`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**max_frames:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[DeforumSdPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**animation_mode:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**zoom:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**translation_x:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**translation_y:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**rotation3d_x:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**rotation3d_y:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**rotation3d_z:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**fps:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**seed:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.ai_animation_generator.async_deforum_sd(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey import AnimationPrompt
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.ai_animation_generator.async_deforum_sd(
+ animation_prompts=[
+ AnimationPrompt(
+ frame="frame",
+ prompt="prompt",
+ )
+ ],
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**animation_prompts:** `typing.Sequence[AnimationPrompt]`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**max_frames:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[DeforumSdPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**animation_mode:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**zoom:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**translation_x:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**translation_y:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**rotation3d_x:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**rotation3d_y:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**rotation3d_z:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**fps:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**seed:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.ai_animation_generator.status_deforum_sd(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.ai_animation_generator.status_deforum_sd(
+ run_id="run_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**run_id:** `str`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## AiArtQrCode
+client.ai_art_qr_code.art_qr_code(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.ai_art_qr_code.art_qr_code(
+ text_prompt="text_prompt",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**text_prompt:** `str`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**qr_code_data:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**qr_code_input_image:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**qr_code_vcard:** `typing.Optional[Vcard]`
+
+
+
+
+
+-
+
+**qr_code_file:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**use_url_shortener:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**negative_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**image_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**image_prompt_controlnet_models:** `typing.Optional[
+ typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]
+]`
+
+
+
+
+
+-
+
+**image_prompt_strength:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**image_prompt_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**image_prompt_pos_x:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**image_prompt_pos_y:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[QrCodeGeneratorPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**selected_controlnet_model:** `typing.Optional[
+ typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem]
+]`
+
+
+
+
+
+-
+
+**output_width:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**output_height:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**guidance_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**controlnet_conditioning_scale:** `typing.Optional[typing.Sequence[float]]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**scheduler:** `typing.Optional[Scheduler]`
+
+
+
+
+
+-
+
+**seed:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**obj_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**obj_pos_x:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**obj_pos_y:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.ai_art_qr_code.async_art_qr_code(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.ai_art_qr_code.async_art_qr_code(
+ text_prompt="text_prompt",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**text_prompt:** `str`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**qr_code_data:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**qr_code_input_image:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**qr_code_vcard:** `typing.Optional[Vcard]`
+
+
+
+
+
+-
+
+**qr_code_file:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**use_url_shortener:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**negative_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**image_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**image_prompt_controlnet_models:** `typing.Optional[
+ typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]
+]`
+
+
+
+
+
+-
+
+**image_prompt_strength:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**image_prompt_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**image_prompt_pos_x:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**image_prompt_pos_y:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[QrCodeGeneratorPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**selected_controlnet_model:** `typing.Optional[
+ typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem]
+]`
+
+
+
+
+
+-
+
+**output_width:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**output_height:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**guidance_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**controlnet_conditioning_scale:** `typing.Optional[typing.Sequence[float]]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**scheduler:** `typing.Optional[Scheduler]`
+
+
+
+
+
+-
+
+**seed:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**obj_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**obj_pos_x:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**obj_pos_y:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.ai_art_qr_code.status_art_qr_code(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.ai_art_qr_code.status_art_qr_code(
+ run_id="run_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**run_id:** `str`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## GeneratePeopleAlsoAskSeoContent
+client.generate_people_also_ask_seo_content.related_qna_maker(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.generate_people_also_ask_seo_content.related_qna_maker(
+ search_query="search_query",
+ site_filter="site_filter",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**search_query:** `str`
+
+
+
+
+
+-
+
+**site_filter:** `str`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**serp_search_location:** `typing.Optional[SerpSearchLocation]`
+
+
+
+
+
+-
+
+**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead
+
+
+
+
+
+-
+
+**serp_search_type:** `typing.Optional[SerpSearchType]`
+
+
+
+
+
+-
+
+**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead
+
+
+
+
+
+-
+
+**task_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**query_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[RelatedQnAPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**avoid_repetition:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**max_tokens:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sampling_temperature:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**max_search_urls:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**max_references:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**max_context_words:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**scroll_jump:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**embedding_model:** `typing.Optional[RelatedQnAPageRequestEmbeddingModel]`
+
+
+
+
+
+-
+
+**dense_weight:** `typing.Optional[float]`
+
+Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.generate_people_also_ask_seo_content.async_related_qna_maker(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.generate_people_also_ask_seo_content.async_related_qna_maker(
+ search_query="search_query",
+ site_filter="site_filter",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**search_query:** `str`
+
+
+
+
+
+-
+
+**site_filter:** `str`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**serp_search_location:** `typing.Optional[SerpSearchLocation]`
+
+
+
+
+
+-
+
+**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead
+
+
+
+
+
+-
+
+**serp_search_type:** `typing.Optional[SerpSearchType]`
+
+
+
+
+
+-
+
+**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead
+
+
+
+
+
+-
+
+**task_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**query_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[RelatedQnAPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**avoid_repetition:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**max_tokens:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sampling_temperature:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**max_search_urls:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**max_references:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**max_context_words:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**scroll_jump:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**embedding_model:** `typing.Optional[RelatedQnAPageRequestEmbeddingModel]`
+
+
+
+
+
+-
+
+**dense_weight:** `typing.Optional[float]`
+
+Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.generate_people_also_ask_seo_content.status_related_qna_maker(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.generate_people_also_ask_seo_content.status_related_qna_maker(
+ run_id="run_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**run_id:** `str`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## CreateAPerfectSeoOptimizedTitleParagraph
+client.create_a_perfect_seo_optimized_title_paragraph.seo_summary(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.create_a_perfect_seo_optimized_title_paragraph.seo_summary(
+ search_query="search_query",
+ keywords="keywords",
+ title="title",
+ company_url="company_url",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**search_query:** `str`
+
+
+
+
+
+-
+
+**keywords:** `str`
+
+
+
+
+
+-
+
+**title:** `str`
+
+
+
+
+
+-
+
+**company_url:** `str`
+
+
+
+
+
+-
+
+**serp_search_location:** `typing.Optional[SerpSearchLocation]`
+
+
+
+
+
+-
+
+**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead
+
+
+
+
+
+-
+
+**serp_search_type:** `typing.Optional[SerpSearchType]`
+
+
+
+
+
+-
+
+**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead
+
+
+
+
+
+-
+
+**task_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**enable_html:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[SeoSummaryPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**sampling_temperature:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**max_tokens:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**avoid_repetition:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**max_search_urls:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**enable_crosslinks:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**seed:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.create_a_perfect_seo_optimized_title_paragraph.async_seo_summary(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.create_a_perfect_seo_optimized_title_paragraph.async_seo_summary(
+ search_query="search_query",
+ keywords="keywords",
+ title="title",
+ company_url="company_url",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**search_query:** `str`
+
+
+
+
+
+-
+
+**keywords:** `str`
+
+
+
+
+
+-
+
+**title:** `str`
+
+
+
+
+
+-
+
+**company_url:** `str`
+
+
+
+
+
+-
+
+**serp_search_location:** `typing.Optional[SerpSearchLocation]`
+
+
+
+
+
+-
+
+**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead
+
+
+
+
+
+-
+
+**serp_search_type:** `typing.Optional[SerpSearchType]`
+
+
+
+
+
+-
+
+**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead
+
+
+
+
+
+-
+
+**task_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**enable_html:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[SeoSummaryPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**sampling_temperature:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**max_tokens:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**avoid_repetition:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**max_search_urls:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**enable_crosslinks:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**seed:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.create_a_perfect_seo_optimized_title_paragraph.status_seo_summary(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.create_a_perfect_seo_optimized_title_paragraph.status_seo_summary(
+ run_id="run_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**run_id:** `str`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## WebSearchGpt3
+client.web_search_gpt3.google_gpt(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.web_search_gpt3.google_gpt(
+ search_query="search_query",
+ site_filter="site_filter",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**search_query:** `str`
+
+
+
+
+
+-
+
+**site_filter:** `str`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**serp_search_location:** `typing.Optional[SerpSearchLocation]`
+
+
+
+
+
+-
+
+**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead
+
+
+
+
+
+-
+
+**serp_search_type:** `typing.Optional[SerpSearchType]`
+
+
+
+
+
+-
+
+**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead
+
+
+
+
+
+-
+
+**task_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**query_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[GoogleGptPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**avoid_repetition:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**max_tokens:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sampling_temperature:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**max_search_urls:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**max_references:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**max_context_words:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**scroll_jump:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**embedding_model:** `typing.Optional[GoogleGptPageRequestEmbeddingModel]`
+
+
+
+
+
+-
+
+**dense_weight:** `typing.Optional[float]`
+
+Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.web_search_gpt3.async_google_gpt(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.web_search_gpt3.async_google_gpt(
+ search_query="search_query",
+ site_filter="site_filter",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**search_query:** `str`
+
+
+
+
+
+-
+
+**site_filter:** `str`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**serp_search_location:** `typing.Optional[SerpSearchLocation]`
+
+
+
+
+
+-
+
+**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead
+
+
+
+
+
+-
+
+**serp_search_type:** `typing.Optional[SerpSearchType]`
+
+
+
+
+
+-
+
+**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead
+
+
+
+
+
+-
+
+**task_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**query_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[GoogleGptPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**avoid_repetition:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**max_tokens:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sampling_temperature:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**max_search_urls:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**max_references:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**max_context_words:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**scroll_jump:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**embedding_model:** `typing.Optional[GoogleGptPageRequestEmbeddingModel]`
+
+
+
+
+
+-
+
+**dense_weight:** `typing.Optional[float]`
+
+Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.web_search_gpt3.status_google_gpt(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.web_search_gpt3.status_google_gpt(
+ run_id="run_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**run_id:** `str`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## ProfileLookupGpt3ForAiPersonalizedEmails
+client.profile_lookup_gpt3for_ai_personalized_emails.social_lookup_email(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.profile_lookup_gpt3for_ai_personalized_emails.social_lookup_email(
+ email_address="email_address",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**email_address:** `str`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**input_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[SocialLookupEmailPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**avoid_repetition:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**max_tokens:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sampling_temperature:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.profile_lookup_gpt3for_ai_personalized_emails.async_social_lookup_email(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.profile_lookup_gpt3for_ai_personalized_emails.async_social_lookup_email(
+ email_address="email_address",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**email_address:** `str`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**input_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[SocialLookupEmailPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**avoid_repetition:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**max_tokens:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sampling_temperature:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.profile_lookup_gpt3for_ai_personalized_emails.status_social_lookup_email(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.profile_lookup_gpt3for_ai_personalized_emails.status_social_lookup_email(
+ run_id="run_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**run_id:** `str`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## BulkRunner
+client.bulk_runner.post(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.bulk_runner.post(
+ documents=["documents"],
+ run_urls=["run_urls"],
+ input_columns={"input_columns": "input_columns"},
+ output_columns={"output_columns": "output_columns"},
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**documents:** `typing.Sequence[str]`
+
+Upload or link to a CSV or google sheet that contains your sample input data.
+For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs.
+Remember to includes header names in your CSV too.
+
+
+
+
+
+-
+
+**run_urls:** `typing.Sequence[str]`
+
+Provide one or more Gooey.AI workflow runs.
+You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them.
+
+
+
+
+
+-
+
+**input_columns:** `typing.Dict[str, str]` — For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it.
+
+
+
+
+
+-
+
+**output_columns:** `typing.Dict[str, str]` — For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data.
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**eval_urls:** `typing.Optional[typing.Sequence[str]]` — _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs.
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.bulk_runner.async_bulk_runner(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.bulk_runner.async_bulk_runner(
+ documents=["documents"],
+ run_urls=["run_urls"],
+ input_columns={"input_columns": "input_columns"},
+ output_columns={"output_columns": "output_columns"},
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**documents:** `typing.Sequence[str]`
+
+Upload or link to a CSV or google sheet that contains your sample input data.
+For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs.
+Remember to includes header names in your CSV too.
+
+
+
+
+
+-
+
+**run_urls:** `typing.Sequence[str]`
+
+Provide one or more Gooey.AI workflow runs.
+You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them.
+
+
+
+
+
+-
+
+**input_columns:** `typing.Dict[str, str]` — For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it.
+
+
+
+
+
+-
+
+**output_columns:** `typing.Dict[str, str]` — For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data.
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**eval_urls:** `typing.Optional[typing.Sequence[str]]` — _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs.
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.bulk_runner.status_bulk_runner(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.bulk_runner.status_bulk_runner(
+ run_id="run_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**run_id:** `str`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## Evaluator
+client.evaluator.bulk_eval(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.evaluator.bulk_eval(
+ documents=["documents"],
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**documents:** `typing.Sequence[str]`
+
+Upload or link to a CSV or google sheet that contains your sample input data.
+For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs.
+Remember to includes header names in your CSV too.
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[BulkEvalPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**avoid_repetition:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**max_tokens:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sampling_temperature:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**eval_prompts:** `typing.Optional[typing.Sequence[EvalPrompt]]`
+
+Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values.
+_The `columns` dictionary can be used to reference the spreadsheet columns._
+
+
+
+
+
+
+-
+
+**agg_functions:** `typing.Optional[typing.Sequence[AggFunction]]` — Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats).
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluator.async_bulk_eval(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.evaluator.async_bulk_eval(
+ documents=["documents"],
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**documents:** `typing.Sequence[str]`
+
+Upload or link to a CSV or google sheet that contains your sample input data.
+For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs.
+Remember to includes header names in your CSV too.
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[BulkEvalPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**avoid_repetition:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**max_tokens:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sampling_temperature:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**eval_prompts:** `typing.Optional[typing.Sequence[EvalPrompt]]`
+
+Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values.
+_The `columns` dictionary can be used to reference the spreadsheet columns._
+
+
+
+
+
+
+-
+
+**agg_functions:** `typing.Optional[typing.Sequence[AggFunction]]` — Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats).
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluator.status_bulk_eval(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.evaluator.status_bulk_eval(
+ run_id="run_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**run_id:** `str`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## SyntheticDataMakerForVideosPdFs
+client.synthetic_data_maker_for_videos_pd_fs.doc_extract(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.synthetic_data_maker_for_videos_pd_fs.doc_extract(
+ documents=["documents"],
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**documents:** `typing.Sequence[str]`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**sheet_url:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**selected_asr_model:** `typing.Optional[DocExtractPageRequestSelectedAsrModel]`
+
+
+
+
+
+-
+
+**google_translate_target:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**glossary_document:** `typing.Optional[str]`
+
+Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+
+
+
+
+
+-
+
+**task_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[DocExtractPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**avoid_repetition:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**max_tokens:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sampling_temperature:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.synthetic_data_maker_for_videos_pd_fs.async_doc_extract(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.synthetic_data_maker_for_videos_pd_fs.async_doc_extract(
+ documents=["documents"],
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**documents:** `typing.Sequence[str]`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**sheet_url:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**selected_asr_model:** `typing.Optional[DocExtractPageRequestSelectedAsrModel]`
+
+
+
+
+
+-
+
+**google_translate_target:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**glossary_document:** `typing.Optional[str]`
+
+Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+
+
+
+
+
+-
+
+**task_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[DocExtractPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**avoid_repetition:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**max_tokens:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sampling_temperature:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.synthetic_data_maker_for_videos_pd_fs.status_doc_extract(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.synthetic_data_maker_for_videos_pd_fs.status_doc_extract(
+ run_id="run_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**run_id:** `str`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## LargeLanguageModelsGpt3
+client.large_language_models_gpt3.compare_llm(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.large_language_models_gpt3.compare_llm()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**input_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**selected_models:** `typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]]`
+
+
+
+
+
+-
+
+**avoid_repetition:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**max_tokens:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sampling_temperature:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**response_format_type:** `typing.Optional[CompareLlmPageRequestResponseFormatType]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.large_language_models_gpt3.async_compare_llm(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.large_language_models_gpt3.async_compare_llm()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**input_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**selected_models:** `typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]]`
+
+
+
+
+
+-
+
+**avoid_repetition:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**max_tokens:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sampling_temperature:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**response_format_type:** `typing.Optional[CompareLlmPageRequestResponseFormatType]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.large_language_models_gpt3.status_compare_llm(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.large_language_models_gpt3.status_compare_llm(
+ run_id="run_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**run_id:** `str`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## SearchYourDocsWithGpt
+client.search_your_docs_with_gpt.doc_search(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.search_your_docs_with_gpt.doc_search(
+ search_query="search_query",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**search_query:** `str`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**keyword_query:** `typing.Optional[DocSearchPageRequestKeywordQuery]`
+
+
+
+
+
+-
+
+**documents:** `typing.Optional[typing.Sequence[str]]`
+
+
+
+
+
+-
+
+**max_references:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**max_context_words:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**scroll_jump:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**doc_extract_url:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**embedding_model:** `typing.Optional[DocSearchPageRequestEmbeddingModel]`
+
+
+
+
+
+-
+
+**dense_weight:** `typing.Optional[float]`
+
+Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+
+
+
+
+-
+
+**task_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**query_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[DocSearchPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**avoid_repetition:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**max_tokens:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sampling_temperature:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**citation_style:** `typing.Optional[DocSearchPageRequestCitationStyle]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.search_your_docs_with_gpt.async_doc_search(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.search_your_docs_with_gpt.async_doc_search(
+ search_query="search_query",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**search_query:** `str`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**keyword_query:** `typing.Optional[DocSearchPageRequestKeywordQuery]`
+
+
+
+
+
+-
+
+**documents:** `typing.Optional[typing.Sequence[str]]`
+
+
+
+
+
+-
+
+**max_references:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**max_context_words:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**scroll_jump:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**doc_extract_url:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**embedding_model:** `typing.Optional[DocSearchPageRequestEmbeddingModel]`
+
+
+
+
+
+-
+
+**dense_weight:** `typing.Optional[float]`
+
+Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+
+
+
+
+-
+
+**task_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**query_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[DocSearchPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**avoid_repetition:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**max_tokens:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sampling_temperature:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**citation_style:** `typing.Optional[DocSearchPageRequestCitationStyle]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.search_your_docs_with_gpt.status_doc_search(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.search_your_docs_with_gpt.status_doc_search(
+ run_id="run_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**run_id:** `str`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## SmartGpt
+client.smart_gpt.post(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.smart_gpt.post(
+ input_prompt="input_prompt",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**input_prompt:** `str`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**cot_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**reflexion_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**dera_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[SmartGptPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**avoid_repetition:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**max_tokens:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sampling_temperature:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.smart_gpt.async_smart_gpt(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.smart_gpt.async_smart_gpt(
+ input_prompt="input_prompt",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**input_prompt:** `str`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**cot_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**reflexion_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**dera_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[SmartGptPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**avoid_repetition:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**max_tokens:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sampling_temperature:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.smart_gpt.status_smart_gpt(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.smart_gpt.status_smart_gpt(
+ run_id="run_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**run_id:** `str`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## SummarizeYourDocsWithGpt
+client.summarize_your_docs_with_gpt.doc_summary(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.summarize_your_docs_with_gpt.doc_summary(
+ documents=["documents"],
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**documents:** `typing.Sequence[str]`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**task_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**merge_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[DocSummaryPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**avoid_repetition:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**max_tokens:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sampling_temperature:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**chain_type:** `typing.Optional[typing.Literal["map_reduce"]]`
+
+
+
+
+
+-
+
+**selected_asr_model:** `typing.Optional[DocSummaryPageRequestSelectedAsrModel]`
+
+
+
+
+
+-
+
+**google_translate_target:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.summarize_your_docs_with_gpt.async_doc_summary(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.summarize_your_docs_with_gpt.async_doc_summary(
+ documents=["documents"],
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**documents:** `typing.Sequence[str]`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**task_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**merge_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[DocSummaryPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**avoid_repetition:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**max_tokens:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sampling_temperature:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**chain_type:** `typing.Optional[typing.Literal["map_reduce"]]`
+
+
+
+
+
+-
+
+**selected_asr_model:** `typing.Optional[DocSummaryPageRequestSelectedAsrModel]`
+
+
+
+
+
+-
+
+**google_translate_target:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.summarize_your_docs_with_gpt.status_doc_summary(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.summarize_your_docs_with_gpt.status_doc_summary(
+ run_id="run_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**run_id:** `str`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## Functions
+client.functions.post(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.functions.post()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**code:** `typing.Optional[str]` — The JS code to be executed.
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used in the code
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.functions.async_functions(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.functions.async_functions()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**code:** `typing.Optional[str]` — The JS code to be executed.
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used in the code
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.functions.status_functions(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.functions.status_functions(
+ run_id="run_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**run_id:** `str`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## LipSyncing
+client.lip_syncing.lipsync(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.lip_syncing.lipsync()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**input_face:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**face_padding_top:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**face_padding_bottom:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**face_padding_left:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**face_padding_right:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sadtalker_settings:** `typing.Optional[SadTalkerSettings]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[LipsyncPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**input_audio:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.lip_syncing.async_lipsync(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.lip_syncing.async_lipsync()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**input_face:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**face_padding_top:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**face_padding_bottom:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**face_padding_left:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**face_padding_right:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sadtalker_settings:** `typing.Optional[SadTalkerSettings]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[LipsyncPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**input_audio:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.lip_syncing.status_lipsync(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.lip_syncing.status_lipsync(
+ run_id="run_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**run_id:** `str`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## LipsyncVideoWithAnyText
+client.lipsync_video_with_any_text.lipsync_tts(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.lipsync_video_with_any_text.lipsync_tts(
+ text_prompt="text_prompt",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**text_prompt:** `str`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**tts_provider:** `typing.Optional[LipsyncTtsPageRequestTtsProvider]`
+
+
+
+
+
+-
+
+**uberduck_voice_name:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**uberduck_speaking_rate:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**google_voice_name:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**google_speaking_rate:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**google_pitch:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**bark_history_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead
+
+
+
+
+
+-
+
+**elevenlabs_api_key:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**elevenlabs_voice_id:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**elevenlabs_model:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**elevenlabs_stability:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**elevenlabs_similarity_boost:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**elevenlabs_style:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**elevenlabs_speaker_boost:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**azure_voice_name:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**openai_voice_name:** `typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName]`
+
+
+
+
+
+-
+
+**openai_tts_model:** `typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel]`
+
+
+
+
+
+-
+
+**input_face:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**face_padding_top:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**face_padding_bottom:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**face_padding_left:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**face_padding_right:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sadtalker_settings:** `typing.Optional[SadTalkerSettings]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[LipsyncTtsPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.lipsync_video_with_any_text.async_lipsync_tts(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.lipsync_video_with_any_text.async_lipsync_tts(
+ text_prompt="text_prompt",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**text_prompt:** `str`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**tts_provider:** `typing.Optional[LipsyncTtsPageRequestTtsProvider]`
+
+
+
+
+
+-
+
+**uberduck_voice_name:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**uberduck_speaking_rate:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**google_voice_name:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**google_speaking_rate:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**google_pitch:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**bark_history_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead
+
+
+
+
+
+-
+
+**elevenlabs_api_key:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**elevenlabs_voice_id:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**elevenlabs_model:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**elevenlabs_stability:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**elevenlabs_similarity_boost:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**elevenlabs_style:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**elevenlabs_speaker_boost:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**azure_voice_name:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**openai_voice_name:** `typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName]`
+
+
+
+
+
+-
+
+**openai_tts_model:** `typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel]`
+
+
+
+
+
+-
+
+**input_face:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**face_padding_top:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**face_padding_bottom:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**face_padding_left:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**face_padding_right:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sadtalker_settings:** `typing.Optional[SadTalkerSettings]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[LipsyncTtsPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.lipsync_video_with_any_text.status_lipsync_tts(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.lipsync_video_with_any_text.status_lipsync_tts(
+ run_id="run_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**run_id:** `str`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## CompareAiVoiceGenerators
+client.compare_ai_voice_generators.text_to_speech(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.compare_ai_voice_generators.text_to_speech(
+ text_prompt="text_prompt",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**text_prompt:** `str`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**tts_provider:** `typing.Optional[TextToSpeechPageRequestTtsProvider]`
+
+
+
+
+
+-
+
+**uberduck_voice_name:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**uberduck_speaking_rate:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**google_voice_name:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**google_speaking_rate:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**google_pitch:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**bark_history_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead
+
+
+
+
+
+-
+
+**elevenlabs_api_key:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**elevenlabs_voice_id:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**elevenlabs_model:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**elevenlabs_stability:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**elevenlabs_similarity_boost:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**elevenlabs_style:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**elevenlabs_speaker_boost:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**azure_voice_name:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**openai_voice_name:** `typing.Optional[TextToSpeechPageRequestOpenaiVoiceName]`
+
+
+
+
+
+-
+
+**openai_tts_model:** `typing.Optional[TextToSpeechPageRequestOpenaiTtsModel]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.compare_ai_voice_generators.async_text_to_speech(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.compare_ai_voice_generators.async_text_to_speech(
+ text_prompt="text_prompt",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**text_prompt:** `str`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**tts_provider:** `typing.Optional[TextToSpeechPageRequestTtsProvider]`
+
+
+
+
+
+-
+
+**uberduck_voice_name:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**uberduck_speaking_rate:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**google_voice_name:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**google_speaking_rate:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**google_pitch:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**bark_history_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead
+
+
+
+
+
+-
+
+**elevenlabs_api_key:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**elevenlabs_voice_id:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**elevenlabs_model:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**elevenlabs_stability:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**elevenlabs_similarity_boost:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**elevenlabs_style:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**elevenlabs_speaker_boost:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**azure_voice_name:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**openai_voice_name:** `typing.Optional[TextToSpeechPageRequestOpenaiVoiceName]`
+
+
+
+
+
+-
+
+**openai_tts_model:** `typing.Optional[TextToSpeechPageRequestOpenaiTtsModel]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.compare_ai_voice_generators.status_text_to_speech(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.compare_ai_voice_generators.status_text_to_speech(
+ run_id="run_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**run_id:** `str`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## SpeechRecognitionTranslation
+client.speech_recognition_translation.asr(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.speech_recognition_translation.asr(
+ documents=["documents"],
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**documents:** `typing.Sequence[str]`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[AsrPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**language:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**translation_model:** `typing.Optional[AsrPageRequestTranslationModel]`
+
+
+
+
+
+-
+
+**output_format:** `typing.Optional[AsrPageRequestOutputFormat]`
+
+
+
+
+
+-
+
+**google_translate_target:** `typing.Optional[str]` — use `translation_model` & `translation_target` instead.
+
+
+
+
+
+-
+
+**translation_source:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**translation_target:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**glossary_document:** `typing.Optional[str]`
+
+Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.speech_recognition_translation.async_asr(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.speech_recognition_translation.async_asr(
+ documents=["documents"],
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**documents:** `typing.Sequence[str]`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[AsrPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**language:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**translation_model:** `typing.Optional[AsrPageRequestTranslationModel]`
+
+
+
+
+
+-
+
+**output_format:** `typing.Optional[AsrPageRequestOutputFormat]`
+
+
+
+
+
+-
+
+**google_translate_target:** `typing.Optional[str]` — use `translation_model` & `translation_target` instead.
+
+
+
+
+
+-
+
+**translation_source:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**translation_target:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**glossary_document:** `typing.Optional[str]`
+
+Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.speech_recognition_translation.status_asr(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.speech_recognition_translation.status_asr(
+ run_id="run_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**run_id:** `str`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## TextGuidedAudioGenerator
+client.text_guided_audio_generator.text2audio(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.text_guided_audio_generator.text2audio(
+ text_prompt="text_prompt",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**text_prompt:** `str`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**negative_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**duration_sec:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**guidance_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**seed:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sd2upscaling:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**selected_models:** `typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.text_guided_audio_generator.async_text2audio(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.text_guided_audio_generator.async_text2audio(
+ text_prompt="text_prompt",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**text_prompt:** `str`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**negative_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**duration_sec:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**guidance_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**seed:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sd2upscaling:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**selected_models:** `typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.text_guided_audio_generator.status_text2audio(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.text_guided_audio_generator.status_text2audio(
+ run_id="run_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**run_id:** `str`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## CompareAiTranslations
+client.compare_ai_translations.translate(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.compare_ai_translations.translate()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**texts:** `typing.Optional[typing.Sequence[str]]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[TranslationPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**translation_source:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**translation_target:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**glossary_document:** `typing.Optional[str]`
+
+Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.compare_ai_translations.async_translate(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.compare_ai_translations.async_translate()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**texts:** `typing.Optional[typing.Sequence[str]]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[TranslationPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**translation_source:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**translation_target:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**glossary_document:** `typing.Optional[str]`
+
+Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.compare_ai_translations.status_translate(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.compare_ai_translations.status_translate(
+ run_id="run_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**run_id:** `str`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## EditAnImageWithAiPrompt
+client.edit_an_image_with_ai_prompt.img2img(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.edit_an_image_with_ai_prompt.img2img(
+ input_image="input_image",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**input_image:** `str`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**text_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[Img2ImgPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**selected_controlnet_model:** `typing.Optional[Img2ImgPageRequestSelectedControlnetModel]`
+
+
+
+
+
+-
+
+**negative_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**output_width:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**output_height:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**guidance_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**prompt_strength:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**controlnet_conditioning_scale:** `typing.Optional[typing.Sequence[float]]`
+
+
+
+
+
+-
+
+**seed:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**image_guidance_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.edit_an_image_with_ai_prompt.async_img2img(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.edit_an_image_with_ai_prompt.async_img2img(
+ input_image="input_image",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**input_image:** `str`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**text_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[Img2ImgPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**selected_controlnet_model:** `typing.Optional[Img2ImgPageRequestSelectedControlnetModel]`
+
+
+
+
+
+-
+
+**negative_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**output_width:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**output_height:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**guidance_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**prompt_strength:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**controlnet_conditioning_scale:** `typing.Optional[typing.Sequence[float]]`
+
+
+
+
+
+-
+
+**seed:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**image_guidance_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.edit_an_image_with_ai_prompt.status_img2img(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.edit_an_image_with_ai_prompt.status_img2img(
+ run_id="run_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**run_id:** `str`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## CompareAiImageGenerators
+client.compare_ai_image_generators.compare_text2img(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.compare_ai_image_generators.compare_text2img(
+ text_prompt="text_prompt",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**text_prompt:** `str`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**negative_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**output_width:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**output_height:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**dall_e3quality:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**dall_e3style:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**guidance_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**seed:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sd2upscaling:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**selected_models:** `typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]]`
+
+
+
+
+
+-
+
+**scheduler:** `typing.Optional[Scheduler]`
+
+
+
+
+
+-
+
+**edit_instruction:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**image_guidance_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.compare_ai_image_generators.async_compare_text2img(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.compare_ai_image_generators.async_compare_text2img(
+ text_prompt="text_prompt",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**text_prompt:** `str`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**negative_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**output_width:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**output_height:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**dall_e3quality:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**dall_e3style:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**guidance_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**seed:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sd2upscaling:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**selected_models:** `typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]]`
+
+
+
+
+
+-
+
+**scheduler:** `typing.Optional[Scheduler]`
+
+
+
+
+
+-
+
+**edit_instruction:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**image_guidance_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.compare_ai_image_generators.status_compare_text2img(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.compare_ai_image_generators.status_compare_text2img(
+ run_id="run_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**run_id:** `str`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## GenerateProductPhotoBackgrounds
+client.generate_product_photo_backgrounds.object_inpainting(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.generate_product_photo_backgrounds.object_inpainting(
+ input_image="input_image",
+ text_prompt="text_prompt",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**input_image:** `str`
+
+
+
+
+
+-
+
+**text_prompt:** `str`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**obj_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**obj_pos_x:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**obj_pos_y:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**mask_threshold:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[ObjectInpaintingPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**negative_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**output_width:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**output_height:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**guidance_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**sd2upscaling:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**seed:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.generate_product_photo_backgrounds.async_object_inpainting(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.generate_product_photo_backgrounds.async_object_inpainting(
+ input_image="input_image",
+ text_prompt="text_prompt",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**input_image:** `str`
+
+
+
+
+
+-
+
+**text_prompt:** `str`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**obj_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**obj_pos_x:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**obj_pos_y:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**mask_threshold:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[ObjectInpaintingPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**negative_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**output_width:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**output_height:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**guidance_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**sd2upscaling:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**seed:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.generate_product_photo_backgrounds.status_object_inpainting(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.generate_product_photo_backgrounds.status_object_inpainting(
+ run_id="run_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**run_id:** `str`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## AiImageWithAFace
+client.ai_image_with_a_face.face_inpainting(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.ai_image_with_a_face.face_inpainting(
+ input_image="input_image",
+ text_prompt="tony stark from the iron man",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**input_image:** `str`
+
+
+
+
+
+-
+
+**text_prompt:** `str`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**face_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**face_pos_x:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**face_pos_y:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[FaceInpaintingPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**negative_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**upscale_factor:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**output_width:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**output_height:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**guidance_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**seed:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.ai_image_with_a_face.async_face_inpainting(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.ai_image_with_a_face.async_face_inpainting(
+ input_image="input_image",
+ text_prompt="tony stark from the iron man",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**input_image:** `str`
+
+
+
+
+
+-
+
+**text_prompt:** `str`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**face_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**face_pos_x:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**face_pos_y:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[FaceInpaintingPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**negative_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**upscale_factor:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**output_width:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**output_height:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**guidance_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**seed:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.ai_image_with_a_face.status_face_inpainting(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.ai_image_with_a_face.status_face_inpainting(
+ run_id="run_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**run_id:** `str`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## AiGeneratedPhotoFromEmailProfileLookup
+client.ai_generated_photo_from_email_profile_lookup.email_face_inpainting(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.ai_generated_photo_from_email_profile_lookup.email_face_inpainting(
+ email_address="sean@dara.network",
+ text_prompt="winter's day in paris",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**text_prompt:** `str`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**email_address:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**twitter_handle:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**face_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**face_pos_x:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**face_pos_y:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[EmailFaceInpaintingPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**negative_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**upscale_factor:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**output_width:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**output_height:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**guidance_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**should_send_email:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**email_from:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**email_cc:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**email_bcc:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**email_subject:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**email_body:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**email_body_enable_html:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**fallback_email_body:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**seed:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.ai_generated_photo_from_email_profile_lookup.async_email_face_inpainting(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.ai_generated_photo_from_email_profile_lookup.async_email_face_inpainting(
+ email_address="sean@dara.network",
+ text_prompt="winter's day in paris",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**text_prompt:** `str`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**email_address:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**twitter_handle:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**face_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**face_pos_x:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**face_pos_y:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[EmailFaceInpaintingPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**negative_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**upscale_factor:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**output_width:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**output_height:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**guidance_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**should_send_email:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**email_from:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**email_cc:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**email_bcc:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**email_subject:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**email_body:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**email_body_enable_html:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**fallback_email_body:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**seed:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.ai_generated_photo_from_email_profile_lookup.status_email_face_inpainting(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.ai_generated_photo_from_email_profile_lookup.status_email_face_inpainting(
+ run_id="run_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**run_id:** `str`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## RenderImageSearchResultsWithAi
+client.render_image_search_results_with_ai.google_image_gen(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.render_image_search_results_with_ai.google_image_gen(
+ search_query="search_query",
+ text_prompt="text_prompt",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**search_query:** `str`
+
+
+
+
+
+-
+
+**text_prompt:** `str`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**serp_search_location:** `typing.Optional[SerpSearchLocation]`
+
+
+
+
+
+-
+
+**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[GoogleImageGenPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**negative_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**guidance_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**prompt_strength:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**sd2upscaling:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**seed:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**image_guidance_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.render_image_search_results_with_ai.async_google_image_gen(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.render_image_search_results_with_ai.async_google_image_gen(
+ search_query="search_query",
+ text_prompt="text_prompt",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**search_query:** `str`
+
+
+
+
+
+-
+
+**text_prompt:** `str`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**serp_search_location:** `typing.Optional[SerpSearchLocation]`
+
+
+
+
+
+-
+
+**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[GoogleImageGenPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**negative_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**guidance_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**prompt_strength:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**sd2upscaling:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**seed:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**image_guidance_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.render_image_search_results_with_ai.status_google_image_gen(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.render_image_search_results_with_ai.status_google_image_gen(
+ run_id="run_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**run_id:** `str`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## AiBackgroundChanger
+client.ai_background_changer.image_segmentation(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.ai_background_changer.image_segmentation(
+ input_image="input_image",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**input_image:** `str`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[ImageSegmentationPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**mask_threshold:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**rect_persepective_transform:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**reflection_opacity:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**obj_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**obj_pos_x:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**obj_pos_y:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.ai_background_changer.async_image_segmentation(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.ai_background_changer.async_image_segmentation(
+ input_image="input_image",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**input_image:** `str`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[ImageSegmentationPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**mask_threshold:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**rect_persepective_transform:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**reflection_opacity:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**obj_scale:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**obj_pos_x:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**obj_pos_y:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.ai_background_changer.status_image_segmentation(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.ai_background_changer.status_image_segmentation(
+ run_id="run_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**run_id:** `str`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## CompareAiImageUpscalers
+client.compare_ai_image_upscalers.compare_ai_upscalers(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.compare_ai_image_upscalers.compare_ai_upscalers(
+ scale=1,
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**scale:** `int` — The final upsampling scale of the image
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**input_image:** `typing.Optional[str]` — Input Image
+
+
+
+
+
+-
+
+**input_video:** `typing.Optional[str]` — Input Video
+
+
+
+
+
+-
+
+**selected_models:** `typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]]`
+
+
+
+
+
+-
+
+**selected_bg_model:** `typing.Optional[typing.Literal["real_esrgan_x2"]]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.compare_ai_image_upscalers.async_compare_ai_upscalers(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.compare_ai_image_upscalers.async_compare_ai_upscalers(
+ scale=1,
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**scale:** `int` — The final upsampling scale of the image
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**input_image:** `typing.Optional[str]` — Input Image
+
+
+
+
+
+-
+
+**input_video:** `typing.Optional[str]` — Input Video
+
+
+
+
+
+-
+
+**selected_models:** `typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]]`
+
+
+
+
+
+-
+
+**selected_bg_model:** `typing.Optional[typing.Literal["real_esrgan_x2"]]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.compare_ai_image_upscalers.status_compare_ai_upscalers(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.compare_ai_image_upscalers.status_compare_ai_upscalers(
+ run_id="run_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**run_id:** `str`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## ChyronPlantBot
+client.chyron_plant_bot.chyron_plant(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.chyron_plant_bot.chyron_plant(
+ midi_notes="C#1 B6 A2 A1 A3 A2",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**midi_notes:** `str`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**midi_notes_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**chyron_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.chyron_plant_bot.async_chyron_plant(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.chyron_plant_bot.async_chyron_plant(
+ midi_notes="C#1 B6 A2 A1 A3 A2",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**midi_notes:** `str`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**midi_notes_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**chyron_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.chyron_plant_bot.status_chyron_plant(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.chyron_plant_bot.status_chyron_plant(
+ run_id="run_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**run_id:** `str`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## LetterWriter
+client.letter_writer.letter_writer(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.letter_writer.letter_writer(
+ action_id="action_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**action_id:** `str`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**prompt_header:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**example_letters:** `typing.Optional[typing.Sequence[TrainingDataModel]]`
+
+
+
+
+
+-
+
+**lm_selected_api:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**lm_selected_engine:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**lm_sampling_temperature:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**api_http_method:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**api_url:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**api_headers:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**api_json_body:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**input_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**strip_html2text:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.letter_writer.async_letter_writer(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.letter_writer.async_letter_writer(
+ action_id="action_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**action_id:** `str`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**prompt_header:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**example_letters:** `typing.Optional[typing.Sequence[TrainingDataModel]]`
+
+
+
+
+
+-
+
+**lm_selected_api:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**lm_selected_engine:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**lm_sampling_temperature:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**api_http_method:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**api_url:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**api_headers:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**api_json_body:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**input_prompt:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**strip_html2text:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.letter_writer.status_letter_writer(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.letter_writer.status_letter_writer(
+ run_id="run_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**run_id:** `str`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## Embeddings
+client.embeddings.post(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.embeddings.post(
+ texts=["texts"],
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**texts:** `typing.Sequence[str]`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[EmbeddingsPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.embeddings.async_embeddings(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.embeddings.async_embeddings(
+ texts=["texts"],
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**texts:** `typing.Sequence[str]`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[EmbeddingsPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.embeddings.status_embeddings(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.embeddings.status_embeddings(
+ run_id="run_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**run_id:** `str`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## PeopleAlsoAskAnswersFromADoc
+client.people_also_ask_answers_from_a_doc.related_qna_maker_doc(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.people_also_ask_answers_from_a_doc.related_qna_maker_doc(
+ search_query="search_query",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**search_query:** `str`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**keyword_query:** `typing.Optional[RelatedQnADocPageRequestKeywordQuery]`
+
+
+
+
+
+-
+
+**documents:** `typing.Optional[typing.Sequence[str]]`
+
+
+
+
+
+-
+
+**max_references:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**max_context_words:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**scroll_jump:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**doc_extract_url:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**embedding_model:** `typing.Optional[RelatedQnADocPageRequestEmbeddingModel]`
+
+
+
+
+
+-
+
+**dense_weight:** `typing.Optional[float]`
+
+Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+
+
+
+
+-
+
+**task_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**query_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[RelatedQnADocPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**avoid_repetition:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**max_tokens:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sampling_temperature:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**citation_style:** `typing.Optional[RelatedQnADocPageRequestCitationStyle]`
+
+
+
+
+
+-
+
+**serp_search_location:** `typing.Optional[SerpSearchLocation]`
+
+
+
+
+
+-
+
+**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead
+
+
+
+
+
+-
+
+**serp_search_type:** `typing.Optional[SerpSearchType]`
+
+
+
+
+
+-
+
+**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.people_also_ask_answers_from_a_doc.async_related_qna_maker_doc(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.people_also_ask_answers_from_a_doc.async_related_qna_maker_doc(
+ search_query="search_query",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**search_query:** `str`
+
+
+
+
+
+-
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+
+
+
+
+
+-
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+
+
+
+
+
+-
+
+**keyword_query:** `typing.Optional[RelatedQnADocPageRequestKeywordQuery]`
+
+
+
+
+
+-
+
+**documents:** `typing.Optional[typing.Sequence[str]]`
+
+
+
+
+
+-
+
+**max_references:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**max_context_words:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**scroll_jump:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**doc_extract_url:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**embedding_model:** `typing.Optional[RelatedQnADocPageRequestEmbeddingModel]`
+
+
+
+
+
+-
+
+**dense_weight:** `typing.Optional[float]`
+
+Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+
+
+
+
+-
+
+**task_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**query_instructions:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**selected_model:** `typing.Optional[RelatedQnADocPageRequestSelectedModel]`
+
+
+
+
+
+-
+
+**avoid_repetition:** `typing.Optional[bool]`
+
+
+
+
+
+-
+
+**num_outputs:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**quality:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**max_tokens:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**sampling_temperature:** `typing.Optional[float]`
+
+
+
+
+
+-
+
+**citation_style:** `typing.Optional[RelatedQnADocPageRequestCitationStyle]`
+
+
+
+
+
+-
+
+**serp_search_location:** `typing.Optional[SerpSearchLocation]`
+
+
+
+
+
+-
+
+**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead
+
+
+
+
+
+-
+
+**serp_search_type:** `typing.Optional[SerpSearchType]`
+
+
+
+
+
+-
+
+**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead
+
+
+
+
+
+-
+
+**settings:** `typing.Optional[RunSettings]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.people_also_ask_answers_from_a_doc.status_related_qna_maker_doc(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.people_also_ask_answers_from_a_doc.status_related_qna_maker_doc(
+ run_id="run_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**run_id:** `str`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## Misc
+client.misc.get_balance()
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.misc.get_balance()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.misc.video_bots_broadcast(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.misc.video_bots_broadcast(
+ text="text",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**text:** `str` — Message to broadcast to all users
+
+
+
+
+
+-
+
+**example_id:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**run_id:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**audio:** `typing.Optional[str]` — Audio URL to send to all users
+
+
+
+
+
+-
+
+**video:** `typing.Optional[str]` — Video URL to send to all users
+
+
+
+
+
+-
+
+**documents:** `typing.Optional[typing.Sequence[str]]` — Video URL to send to all users
+
+
+
+
+
+-
+
+**buttons:** `typing.Optional[typing.Sequence[ReplyButton]]` — Buttons to send to all users
+
+
+
+
+
+-
+
+**filters:** `typing.Optional[BotBroadcastFilters]` — Filters to select users to broadcast to. If not provided, will broadcast to all users of this bot.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.misc.health()
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from gooey.client import Gooey
+
+client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+)
+client.misc.health()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/gooey/__init__.py b/src/gooey/__init__.py
new file mode 100644
index 0000000..1f0c284
--- /dev/null
+++ b/src/gooey/__init__.py
@@ -0,0 +1,614 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .types import (
+ AggFunction,
+ AggFunctionResult,
+ AnimationPrompt,
+ AsrChunk,
+ AsrOutputJson,
+ AsrPageOutput,
+ AsrPageOutputOutputTextItem,
+ AsrPageRequest,
+ AsrPageRequestOutputFormat,
+ AsrPageRequestSelectedModel,
+ AsrPageRequestTranslationModel,
+ AsrPageResponse,
+ AsrPageStatusResponse,
+ AsyncApiResponseModelV3,
+ BalanceResponse,
+ BotBroadcastFilters,
+ BulkEvalPageOutput,
+ BulkEvalPageRequest,
+ BulkEvalPageRequestSelectedModel,
+ BulkEvalPageResponse,
+ BulkEvalPageStatusResponse,
+ BulkRunnerPageOutput,
+ BulkRunnerPageRequest,
+ BulkRunnerPageResponse,
+ BulkRunnerPageStatusResponse,
+ ButtonPressed,
+ CalledFunctionResponse,
+ ChatCompletionContentPartImageParam,
+ ChatCompletionContentPartTextParam,
+ ChyronPlantPageOutput,
+ ChyronPlantPageRequest,
+ ChyronPlantPageResponse,
+ ChyronPlantPageStatusResponse,
+ CompareLlmPageOutput,
+ CompareLlmPageRequest,
+ CompareLlmPageRequestResponseFormatType,
+ CompareLlmPageRequestSelectedModelsItem,
+ CompareLlmPageResponse,
+ CompareLlmPageStatusResponse,
+ CompareText2ImgPageOutput,
+ CompareText2ImgPageRequest,
+ CompareText2ImgPageRequestSelectedModelsItem,
+ CompareText2ImgPageResponse,
+ CompareText2ImgPageStatusResponse,
+ CompareUpscalerPageOutput,
+ CompareUpscalerPageRequest,
+ CompareUpscalerPageRequestSelectedModelsItem,
+ CompareUpscalerPageResponse,
+ CompareUpscalerPageStatusResponse,
+ ConsoleLogs,
+ Content,
+ ConversationEntry,
+ ConversationEntryContentItem,
+ ConversationEntryContentItem_ImageUrl,
+ ConversationEntryContentItem_Text,
+ ConversationStart,
+ CreateStreamResponse,
+ DeforumSdPageOutput,
+ DeforumSdPageRequest,
+ DeforumSdPageRequestSelectedModel,
+ DeforumSdPageResponse,
+ DeforumSdPageStatusResponse,
+ Detail,
+ DocExtractPageOutput,
+ DocExtractPageRequest,
+ DocExtractPageRequestSelectedAsrModel,
+ DocExtractPageRequestSelectedModel,
+ DocExtractPageResponse,
+ DocExtractPageStatusResponse,
+ DocSearchPageOutput,
+ DocSearchPageRequest,
+ DocSearchPageRequestCitationStyle,
+ DocSearchPageRequestEmbeddingModel,
+ DocSearchPageRequestKeywordQuery,
+ DocSearchPageRequestSelectedModel,
+ DocSearchPageResponse,
+ DocSearchPageStatusResponse,
+ DocSummaryPageOutput,
+ DocSummaryPageRequest,
+ DocSummaryPageRequestSelectedAsrModel,
+ DocSummaryPageRequestSelectedModel,
+ DocSummaryPageResponse,
+ DocSummaryPageStatusResponse,
+ EmailFaceInpaintingPageOutput,
+ EmailFaceInpaintingPageRequest,
+ EmailFaceInpaintingPageRequestSelectedModel,
+ EmailFaceInpaintingPageResponse,
+ EmailFaceInpaintingPageStatusResponse,
+ EmbeddingsPageOutput,
+ EmbeddingsPageRequest,
+ EmbeddingsPageRequestSelectedModel,
+ EmbeddingsPageResponse,
+ EmbeddingsPageStatusResponse,
+ EvalPrompt,
+ FaceInpaintingPageOutput,
+ FaceInpaintingPageRequest,
+ FaceInpaintingPageRequestSelectedModel,
+ FaceInpaintingPageResponse,
+ FaceInpaintingPageStatusResponse,
+ FailedReponseModelV2,
+ FailedResponseDetail,
+ FinalResponse,
+ Function,
+ FunctionsPageOutput,
+ FunctionsPageRequest,
+ FunctionsPageResponse,
+ FunctionsPageStatusResponse,
+ GenericErrorResponse,
+ GenericErrorResponseDetail,
+ GoogleGptPageOutput,
+ GoogleGptPageRequest,
+ GoogleGptPageRequestEmbeddingModel,
+ GoogleGptPageRequestSelectedModel,
+ GoogleGptPageResponse,
+ GoogleGptPageStatusResponse,
+ GoogleImageGenPageOutput,
+ GoogleImageGenPageRequest,
+ GoogleImageGenPageRequestSelectedModel,
+ GoogleImageGenPageResponse,
+ GoogleImageGenPageStatusResponse,
+ HttpValidationError,
+ ImageSegmentationPageOutput,
+ ImageSegmentationPageRequest,
+ ImageSegmentationPageRequestSelectedModel,
+ ImageSegmentationPageResponse,
+ ImageSegmentationPageStatusResponse,
+ ImageUrl,
+ Img2ImgPageOutput,
+ Img2ImgPageRequest,
+ Img2ImgPageRequestSelectedControlnetModel,
+ Img2ImgPageRequestSelectedControlnetModelItem,
+ Img2ImgPageRequestSelectedModel,
+ Img2ImgPageResponse,
+ Img2ImgPageStatusResponse,
+ LetterWriterPageOutput,
+ LetterWriterPageRequest,
+ LetterWriterPageResponse,
+ LetterWriterPageStatusResponse,
+ Level,
+ LipsyncPageOutput,
+ LipsyncPageRequest,
+ LipsyncPageRequestSelectedModel,
+ LipsyncPageResponse,
+ LipsyncPageStatusResponse,
+ LipsyncTtsPageOutput,
+ LipsyncTtsPageRequest,
+ LipsyncTtsPageRequestOpenaiTtsModel,
+ LipsyncTtsPageRequestOpenaiVoiceName,
+ LipsyncTtsPageRequestSelectedModel,
+ LipsyncTtsPageRequestTtsProvider,
+ LipsyncTtsPageResponse,
+ LipsyncTtsPageStatusResponse,
+ LlmTools,
+ MessagePart,
+ ObjectInpaintingPageOutput,
+ ObjectInpaintingPageRequest,
+ ObjectInpaintingPageRequestSelectedModel,
+ ObjectInpaintingPageResponse,
+ ObjectInpaintingPageStatusResponse,
+ Preprocess,
+ Prompt,
+ PromptTreeNode,
+ QrCodeGeneratorPageOutput,
+ QrCodeGeneratorPageRequest,
+ QrCodeGeneratorPageRequestImagePromptControlnetModelsItem,
+ QrCodeGeneratorPageRequestSelectedControlnetModelItem,
+ QrCodeGeneratorPageRequestSelectedModel,
+ QrCodeGeneratorPageResponse,
+ QrCodeGeneratorPageStatusResponse,
+ RecipeFunction,
+ RecipeRunState,
+ RelatedDocSearchResponse,
+ RelatedGoogleGptResponse,
+ RelatedQnADocPageOutput,
+ RelatedQnADocPageRequest,
+ RelatedQnADocPageRequestCitationStyle,
+ RelatedQnADocPageRequestEmbeddingModel,
+ RelatedQnADocPageRequestKeywordQuery,
+ RelatedQnADocPageRequestSelectedModel,
+ RelatedQnADocPageResponse,
+ RelatedQnADocPageStatusResponse,
+ RelatedQnAPageOutput,
+ RelatedQnAPageRequest,
+ RelatedQnAPageRequestEmbeddingModel,
+ RelatedQnAPageRequestSelectedModel,
+ RelatedQnAPageResponse,
+ RelatedQnAPageStatusResponse,
+ ReplyButton,
+ ResponseModel,
+ ResponseModelFinalKeywordQuery,
+ ResponseModelFinalPrompt,
+ Role,
+ RunSettings,
+ RunSettingsRetentionPolicy,
+ RunStart,
+ SadTalkerSettings,
+ Scheduler,
+ SearchReference,
+ SeoSummaryPageOutput,
+ SeoSummaryPageRequest,
+ SeoSummaryPageRequestSelectedModel,
+ SeoSummaryPageResponse,
+ SeoSummaryPageStatusResponse,
+ SerpSearchLocation,
+ SerpSearchType,
+ SmartGptPageOutput,
+ SmartGptPageRequest,
+ SmartGptPageRequestSelectedModel,
+ SmartGptPageResponse,
+ SmartGptPageStatusResponse,
+ SocialLookupEmailPageOutput,
+ SocialLookupEmailPageRequest,
+ SocialLookupEmailPageRequestSelectedModel,
+ SocialLookupEmailPageResponse,
+ SocialLookupEmailPageStatusResponse,
+ StreamError,
+ Text2AudioPageOutput,
+ Text2AudioPageRequest,
+ Text2AudioPageResponse,
+ Text2AudioPageStatusResponse,
+ TextToSpeechPageOutput,
+ TextToSpeechPageRequest,
+ TextToSpeechPageRequestOpenaiTtsModel,
+ TextToSpeechPageRequestOpenaiVoiceName,
+ TextToSpeechPageRequestTtsProvider,
+ TextToSpeechPageResponse,
+ TextToSpeechPageStatusResponse,
+ TrainingDataModel,
+ TranslationPageOutput,
+ TranslationPageRequest,
+ TranslationPageRequestSelectedModel,
+ TranslationPageResponse,
+ TranslationPageStatusResponse,
+ Trigger,
+ ValidationError,
+ ValidationErrorLocItem,
+ Vcard,
+ VideoBotsPageOutput,
+ VideoBotsPageOutputFinalKeywordQuery,
+ VideoBotsPageOutputFinalPrompt,
+ VideoBotsPageRequest,
+ VideoBotsPageRequestAsrModel,
+ VideoBotsPageRequestCitationStyle,
+ VideoBotsPageRequestEmbeddingModel,
+ VideoBotsPageRequestLipsyncModel,
+ VideoBotsPageRequestOpenaiTtsModel,
+ VideoBotsPageRequestOpenaiVoiceName,
+ VideoBotsPageRequestSelectedModel,
+ VideoBotsPageRequestTranslationModel,
+ VideoBotsPageRequestTtsProvider,
+ VideoBotsPageResponse,
+ VideoBotsPageStatusResponse,
+)
+from .errors import InternalServerError, PaymentRequiredError, TooManyRequestsError, UnprocessableEntityError
+from . import (
+ ai_animation_generator,
+ ai_art_qr_code,
+ ai_background_changer,
+ ai_generated_photo_from_email_profile_lookup,
+ ai_image_with_a_face,
+ bulk_runner,
+ chyron_plant_bot,
+ compare_ai_image_generators,
+ compare_ai_image_upscalers,
+ compare_ai_translations,
+ compare_ai_voice_generators,
+ copilot_for_your_enterprise,
+ copilot_integrations,
+ create_a_perfect_seo_optimized_title_paragraph,
+ edit_an_image_with_ai_prompt,
+ embeddings,
+ evaluator,
+ functions,
+ generate_people_also_ask_seo_content,
+ generate_product_photo_backgrounds,
+ large_language_models_gpt3,
+ letter_writer,
+ lip_syncing,
+ lipsync_video_with_any_text,
+ misc,
+ people_also_ask_answers_from_a_doc,
+ profile_lookup_gpt3for_ai_personalized_emails,
+ render_image_search_results_with_ai,
+ search_your_docs_with_gpt,
+ smart_gpt,
+ speech_recognition_translation,
+ summarize_your_docs_with_gpt,
+ synthetic_data_maker_for_videos_pd_fs,
+ text_guided_audio_generator,
+ web_search_gpt3,
+)
+from .copilot_integrations import (
+ CreateStreamRequestAsrModel,
+ CreateStreamRequestCitationStyle,
+ CreateStreamRequestEmbeddingModel,
+ CreateStreamRequestLipsyncModel,
+ CreateStreamRequestOpenaiTtsModel,
+ CreateStreamRequestOpenaiVoiceName,
+ CreateStreamRequestSelectedModel,
+ CreateStreamRequestTranslationModel,
+ CreateStreamRequestTtsProvider,
+ VideoBotsStreamResponse,
+)
+from .environment import GooeyEnvironment
+from .version import __version__
+
+__all__ = [
+ "AggFunction",
+ "AggFunctionResult",
+ "AnimationPrompt",
+ "AsrChunk",
+ "AsrOutputJson",
+ "AsrPageOutput",
+ "AsrPageOutputOutputTextItem",
+ "AsrPageRequest",
+ "AsrPageRequestOutputFormat",
+ "AsrPageRequestSelectedModel",
+ "AsrPageRequestTranslationModel",
+ "AsrPageResponse",
+ "AsrPageStatusResponse",
+ "AsyncApiResponseModelV3",
+ "BalanceResponse",
+ "BotBroadcastFilters",
+ "BulkEvalPageOutput",
+ "BulkEvalPageRequest",
+ "BulkEvalPageRequestSelectedModel",
+ "BulkEvalPageResponse",
+ "BulkEvalPageStatusResponse",
+ "BulkRunnerPageOutput",
+ "BulkRunnerPageRequest",
+ "BulkRunnerPageResponse",
+ "BulkRunnerPageStatusResponse",
+ "ButtonPressed",
+ "CalledFunctionResponse",
+ "ChatCompletionContentPartImageParam",
+ "ChatCompletionContentPartTextParam",
+ "ChyronPlantPageOutput",
+ "ChyronPlantPageRequest",
+ "ChyronPlantPageResponse",
+ "ChyronPlantPageStatusResponse",
+ "CompareLlmPageOutput",
+ "CompareLlmPageRequest",
+ "CompareLlmPageRequestResponseFormatType",
+ "CompareLlmPageRequestSelectedModelsItem",
+ "CompareLlmPageResponse",
+ "CompareLlmPageStatusResponse",
+ "CompareText2ImgPageOutput",
+ "CompareText2ImgPageRequest",
+ "CompareText2ImgPageRequestSelectedModelsItem",
+ "CompareText2ImgPageResponse",
+ "CompareText2ImgPageStatusResponse",
+ "CompareUpscalerPageOutput",
+ "CompareUpscalerPageRequest",
+ "CompareUpscalerPageRequestSelectedModelsItem",
+ "CompareUpscalerPageResponse",
+ "CompareUpscalerPageStatusResponse",
+ "ConsoleLogs",
+ "Content",
+ "ConversationEntry",
+ "ConversationEntryContentItem",
+ "ConversationEntryContentItem_ImageUrl",
+ "ConversationEntryContentItem_Text",
+ "ConversationStart",
+ "CreateStreamRequestAsrModel",
+ "CreateStreamRequestCitationStyle",
+ "CreateStreamRequestEmbeddingModel",
+ "CreateStreamRequestLipsyncModel",
+ "CreateStreamRequestOpenaiTtsModel",
+ "CreateStreamRequestOpenaiVoiceName",
+ "CreateStreamRequestSelectedModel",
+ "CreateStreamRequestTranslationModel",
+ "CreateStreamRequestTtsProvider",
+ "CreateStreamResponse",
+ "DeforumSdPageOutput",
+ "DeforumSdPageRequest",
+ "DeforumSdPageRequestSelectedModel",
+ "DeforumSdPageResponse",
+ "DeforumSdPageStatusResponse",
+ "Detail",
+ "DocExtractPageOutput",
+ "DocExtractPageRequest",
+ "DocExtractPageRequestSelectedAsrModel",
+ "DocExtractPageRequestSelectedModel",
+ "DocExtractPageResponse",
+ "DocExtractPageStatusResponse",
+ "DocSearchPageOutput",
+ "DocSearchPageRequest",
+ "DocSearchPageRequestCitationStyle",
+ "DocSearchPageRequestEmbeddingModel",
+ "DocSearchPageRequestKeywordQuery",
+ "DocSearchPageRequestSelectedModel",
+ "DocSearchPageResponse",
+ "DocSearchPageStatusResponse",
+ "DocSummaryPageOutput",
+ "DocSummaryPageRequest",
+ "DocSummaryPageRequestSelectedAsrModel",
+ "DocSummaryPageRequestSelectedModel",
+ "DocSummaryPageResponse",
+ "DocSummaryPageStatusResponse",
+ "EmailFaceInpaintingPageOutput",
+ "EmailFaceInpaintingPageRequest",
+ "EmailFaceInpaintingPageRequestSelectedModel",
+ "EmailFaceInpaintingPageResponse",
+ "EmailFaceInpaintingPageStatusResponse",
+ "EmbeddingsPageOutput",
+ "EmbeddingsPageRequest",
+ "EmbeddingsPageRequestSelectedModel",
+ "EmbeddingsPageResponse",
+ "EmbeddingsPageStatusResponse",
+ "EvalPrompt",
+ "FaceInpaintingPageOutput",
+ "FaceInpaintingPageRequest",
+ "FaceInpaintingPageRequestSelectedModel",
+ "FaceInpaintingPageResponse",
+ "FaceInpaintingPageStatusResponse",
+ "FailedReponseModelV2",
+ "FailedResponseDetail",
+ "FinalResponse",
+ "Function",
+ "FunctionsPageOutput",
+ "FunctionsPageRequest",
+ "FunctionsPageResponse",
+ "FunctionsPageStatusResponse",
+ "GenericErrorResponse",
+ "GenericErrorResponseDetail",
+ "GooeyEnvironment",
+ "GoogleGptPageOutput",
+ "GoogleGptPageRequest",
+ "GoogleGptPageRequestEmbeddingModel",
+ "GoogleGptPageRequestSelectedModel",
+ "GoogleGptPageResponse",
+ "GoogleGptPageStatusResponse",
+ "GoogleImageGenPageOutput",
+ "GoogleImageGenPageRequest",
+ "GoogleImageGenPageRequestSelectedModel",
+ "GoogleImageGenPageResponse",
+ "GoogleImageGenPageStatusResponse",
+ "HttpValidationError",
+ "ImageSegmentationPageOutput",
+ "ImageSegmentationPageRequest",
+ "ImageSegmentationPageRequestSelectedModel",
+ "ImageSegmentationPageResponse",
+ "ImageSegmentationPageStatusResponse",
+ "ImageUrl",
+ "Img2ImgPageOutput",
+ "Img2ImgPageRequest",
+ "Img2ImgPageRequestSelectedControlnetModel",
+ "Img2ImgPageRequestSelectedControlnetModelItem",
+ "Img2ImgPageRequestSelectedModel",
+ "Img2ImgPageResponse",
+ "Img2ImgPageStatusResponse",
+ "InternalServerError",
+ "LetterWriterPageOutput",
+ "LetterWriterPageRequest",
+ "LetterWriterPageResponse",
+ "LetterWriterPageStatusResponse",
+ "Level",
+ "LipsyncPageOutput",
+ "LipsyncPageRequest",
+ "LipsyncPageRequestSelectedModel",
+ "LipsyncPageResponse",
+ "LipsyncPageStatusResponse",
+ "LipsyncTtsPageOutput",
+ "LipsyncTtsPageRequest",
+ "LipsyncTtsPageRequestOpenaiTtsModel",
+ "LipsyncTtsPageRequestOpenaiVoiceName",
+ "LipsyncTtsPageRequestSelectedModel",
+ "LipsyncTtsPageRequestTtsProvider",
+ "LipsyncTtsPageResponse",
+ "LipsyncTtsPageStatusResponse",
+ "LlmTools",
+ "MessagePart",
+ "ObjectInpaintingPageOutput",
+ "ObjectInpaintingPageRequest",
+ "ObjectInpaintingPageRequestSelectedModel",
+ "ObjectInpaintingPageResponse",
+ "ObjectInpaintingPageStatusResponse",
+ "PaymentRequiredError",
+ "Preprocess",
+ "Prompt",
+ "PromptTreeNode",
+ "QrCodeGeneratorPageOutput",
+ "QrCodeGeneratorPageRequest",
+ "QrCodeGeneratorPageRequestImagePromptControlnetModelsItem",
+ "QrCodeGeneratorPageRequestSelectedControlnetModelItem",
+ "QrCodeGeneratorPageRequestSelectedModel",
+ "QrCodeGeneratorPageResponse",
+ "QrCodeGeneratorPageStatusResponse",
+ "RecipeFunction",
+ "RecipeRunState",
+ "RelatedDocSearchResponse",
+ "RelatedGoogleGptResponse",
+ "RelatedQnADocPageOutput",
+ "RelatedQnADocPageRequest",
+ "RelatedQnADocPageRequestCitationStyle",
+ "RelatedQnADocPageRequestEmbeddingModel",
+ "RelatedQnADocPageRequestKeywordQuery",
+ "RelatedQnADocPageRequestSelectedModel",
+ "RelatedQnADocPageResponse",
+ "RelatedQnADocPageStatusResponse",
+ "RelatedQnAPageOutput",
+ "RelatedQnAPageRequest",
+ "RelatedQnAPageRequestEmbeddingModel",
+ "RelatedQnAPageRequestSelectedModel",
+ "RelatedQnAPageResponse",
+ "RelatedQnAPageStatusResponse",
+ "ReplyButton",
+ "ResponseModel",
+ "ResponseModelFinalKeywordQuery",
+ "ResponseModelFinalPrompt",
+ "Role",
+ "RunSettings",
+ "RunSettingsRetentionPolicy",
+ "RunStart",
+ "SadTalkerSettings",
+ "Scheduler",
+ "SearchReference",
+ "SeoSummaryPageOutput",
+ "SeoSummaryPageRequest",
+ "SeoSummaryPageRequestSelectedModel",
+ "SeoSummaryPageResponse",
+ "SeoSummaryPageStatusResponse",
+ "SerpSearchLocation",
+ "SerpSearchType",
+ "SmartGptPageOutput",
+ "SmartGptPageRequest",
+ "SmartGptPageRequestSelectedModel",
+ "SmartGptPageResponse",
+ "SmartGptPageStatusResponse",
+ "SocialLookupEmailPageOutput",
+ "SocialLookupEmailPageRequest",
+ "SocialLookupEmailPageRequestSelectedModel",
+ "SocialLookupEmailPageResponse",
+ "SocialLookupEmailPageStatusResponse",
+ "StreamError",
+ "Text2AudioPageOutput",
+ "Text2AudioPageRequest",
+ "Text2AudioPageResponse",
+ "Text2AudioPageStatusResponse",
+ "TextToSpeechPageOutput",
+ "TextToSpeechPageRequest",
+ "TextToSpeechPageRequestOpenaiTtsModel",
+ "TextToSpeechPageRequestOpenaiVoiceName",
+ "TextToSpeechPageRequestTtsProvider",
+ "TextToSpeechPageResponse",
+ "TextToSpeechPageStatusResponse",
+ "TooManyRequestsError",
+ "TrainingDataModel",
+ "TranslationPageOutput",
+ "TranslationPageRequest",
+ "TranslationPageRequestSelectedModel",
+ "TranslationPageResponse",
+ "TranslationPageStatusResponse",
+ "Trigger",
+ "UnprocessableEntityError",
+ "ValidationError",
+ "ValidationErrorLocItem",
+ "Vcard",
+ "VideoBotsPageOutput",
+ "VideoBotsPageOutputFinalKeywordQuery",
+ "VideoBotsPageOutputFinalPrompt",
+ "VideoBotsPageRequest",
+ "VideoBotsPageRequestAsrModel",
+ "VideoBotsPageRequestCitationStyle",
+ "VideoBotsPageRequestEmbeddingModel",
+ "VideoBotsPageRequestLipsyncModel",
+ "VideoBotsPageRequestOpenaiTtsModel",
+ "VideoBotsPageRequestOpenaiVoiceName",
+ "VideoBotsPageRequestSelectedModel",
+ "VideoBotsPageRequestTranslationModel",
+ "VideoBotsPageRequestTtsProvider",
+ "VideoBotsPageResponse",
+ "VideoBotsPageStatusResponse",
+ "VideoBotsStreamResponse",
+ "__version__",
+ "ai_animation_generator",
+ "ai_art_qr_code",
+ "ai_background_changer",
+ "ai_generated_photo_from_email_profile_lookup",
+ "ai_image_with_a_face",
+ "bulk_runner",
+ "chyron_plant_bot",
+ "compare_ai_image_generators",
+ "compare_ai_image_upscalers",
+ "compare_ai_translations",
+ "compare_ai_voice_generators",
+ "copilot_for_your_enterprise",
+ "copilot_integrations",
+ "create_a_perfect_seo_optimized_title_paragraph",
+ "edit_an_image_with_ai_prompt",
+ "embeddings",
+ "evaluator",
+ "functions",
+ "generate_people_also_ask_seo_content",
+ "generate_product_photo_backgrounds",
+ "large_language_models_gpt3",
+ "letter_writer",
+ "lip_syncing",
+ "lipsync_video_with_any_text",
+ "misc",
+ "people_also_ask_answers_from_a_doc",
+ "profile_lookup_gpt3for_ai_personalized_emails",
+ "render_image_search_results_with_ai",
+ "search_your_docs_with_gpt",
+ "smart_gpt",
+ "speech_recognition_translation",
+ "summarize_your_docs_with_gpt",
+ "synthetic_data_maker_for_videos_pd_fs",
+ "text_guided_audio_generator",
+ "web_search_gpt3",
+]
diff --git a/src/gooey/ai_animation_generator/__init__.py b/src/gooey/ai_animation_generator/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/ai_animation_generator/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/ai_animation_generator/client.py b/src/gooey/ai_animation_generator/client.py
new file mode 100644
index 0000000..04762e1
--- /dev/null
+++ b/src/gooey/ai_animation_generator/client.py
@@ -0,0 +1,644 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import pydantic_v1
+from ..core.request_options import RequestOptions
+from ..errors.internal_server_error import InternalServerError
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.animation_prompt import AnimationPrompt
+from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3
+from ..types.deforum_sd_page_request_selected_model import DeforumSdPageRequestSelectedModel
+from ..types.deforum_sd_page_response import DeforumSdPageResponse
+from ..types.deforum_sd_page_status_response import DeforumSdPageStatusResponse
+from ..types.failed_reponse_model_v2 import FailedReponseModelV2
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.recipe_function import RecipeFunction
+from ..types.run_settings import RunSettings
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class AiAnimationGeneratorClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def deforum_sd(
+ self,
+ *,
+ animation_prompts: typing.Sequence[AnimationPrompt],
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ max_frames: typing.Optional[int] = OMIT,
+ selected_model: typing.Optional[DeforumSdPageRequestSelectedModel] = OMIT,
+ animation_mode: typing.Optional[str] = OMIT,
+ zoom: typing.Optional[str] = OMIT,
+ translation_x: typing.Optional[str] = OMIT,
+ translation_y: typing.Optional[str] = OMIT,
+ rotation3d_x: typing.Optional[str] = OMIT,
+ rotation3d_y: typing.Optional[str] = OMIT,
+ rotation3d_z: typing.Optional[str] = OMIT,
+ fps: typing.Optional[int] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> DeforumSdPageResponse:
+ """
+ Parameters
+ ----------
+ animation_prompts : typing.Sequence[AnimationPrompt]
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ max_frames : typing.Optional[int]
+
+ selected_model : typing.Optional[DeforumSdPageRequestSelectedModel]
+
+ animation_mode : typing.Optional[str]
+
+ zoom : typing.Optional[str]
+
+ translation_x : typing.Optional[str]
+
+ translation_y : typing.Optional[str]
+
+ rotation3d_x : typing.Optional[str]
+
+ rotation3d_y : typing.Optional[str]
+
+ rotation3d_z : typing.Optional[str]
+
+ fps : typing.Optional[int]
+
+ seed : typing.Optional[int]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ DeforumSdPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey import AnimationPrompt
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.ai_animation_generator.deforum_sd(
+ animation_prompts=[
+ AnimationPrompt(
+ frame="frame",
+ prompt="prompt",
+ )
+ ],
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v2/DeforumSD/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "animation_prompts": animation_prompts,
+ "max_frames": max_frames,
+ "selected_model": selected_model,
+ "animation_mode": animation_mode,
+ "zoom": zoom,
+ "translation_x": translation_x,
+ "translation_y": translation_y,
+ "rotation_3d_x": rotation3d_x,
+ "rotation_3d_y": rotation3d_y,
+ "rotation_3d_z": rotation3d_z,
+ "fps": fps,
+ "seed": seed,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(DeforumSdPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def async_deforum_sd(
+ self,
+ *,
+ animation_prompts: typing.Sequence[AnimationPrompt],
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ max_frames: typing.Optional[int] = OMIT,
+ selected_model: typing.Optional[DeforumSdPageRequestSelectedModel] = OMIT,
+ animation_mode: typing.Optional[str] = OMIT,
+ zoom: typing.Optional[str] = OMIT,
+ translation_x: typing.Optional[str] = OMIT,
+ translation_y: typing.Optional[str] = OMIT,
+ rotation3d_x: typing.Optional[str] = OMIT,
+ rotation3d_y: typing.Optional[str] = OMIT,
+ rotation3d_z: typing.Optional[str] = OMIT,
+ fps: typing.Optional[int] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ animation_prompts : typing.Sequence[AnimationPrompt]
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ max_frames : typing.Optional[int]
+
+ selected_model : typing.Optional[DeforumSdPageRequestSelectedModel]
+
+ animation_mode : typing.Optional[str]
+
+ zoom : typing.Optional[str]
+
+ translation_x : typing.Optional[str]
+
+ translation_y : typing.Optional[str]
+
+ rotation3d_x : typing.Optional[str]
+
+ rotation3d_y : typing.Optional[str]
+
+ rotation3d_z : typing.Optional[str]
+
+ fps : typing.Optional[int]
+
+ seed : typing.Optional[int]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ from gooey import AnimationPrompt
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.ai_animation_generator.async_deforum_sd(
+ animation_prompts=[
+ AnimationPrompt(
+ frame="frame",
+ prompt="prompt",
+ )
+ ],
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/DeforumSD/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "animation_prompts": animation_prompts,
+ "max_frames": max_frames,
+ "selected_model": selected_model,
+ "animation_mode": animation_mode,
+ "zoom": zoom,
+ "translation_x": translation_x,
+ "translation_y": translation_y,
+ "rotation_3d_x": rotation3d_x,
+ "rotation_3d_y": rotation3d_y,
+ "rotation_3d_z": rotation3d_z,
+ "fps": fps,
+ "seed": seed,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def status_deforum_sd(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> DeforumSdPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ DeforumSdPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.ai_animation_generator.status_deforum_sd(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/DeforumSD/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(DeforumSdPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncAiAnimationGeneratorClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def deforum_sd(
+ self,
+ *,
+ animation_prompts: typing.Sequence[AnimationPrompt],
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ max_frames: typing.Optional[int] = OMIT,
+ selected_model: typing.Optional[DeforumSdPageRequestSelectedModel] = OMIT,
+ animation_mode: typing.Optional[str] = OMIT,
+ zoom: typing.Optional[str] = OMIT,
+ translation_x: typing.Optional[str] = OMIT,
+ translation_y: typing.Optional[str] = OMIT,
+ rotation3d_x: typing.Optional[str] = OMIT,
+ rotation3d_y: typing.Optional[str] = OMIT,
+ rotation3d_z: typing.Optional[str] = OMIT,
+ fps: typing.Optional[int] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> DeforumSdPageResponse:
+ """
+ Parameters
+ ----------
+ animation_prompts : typing.Sequence[AnimationPrompt]
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ max_frames : typing.Optional[int]
+
+ selected_model : typing.Optional[DeforumSdPageRequestSelectedModel]
+
+ animation_mode : typing.Optional[str]
+
+ zoom : typing.Optional[str]
+
+ translation_x : typing.Optional[str]
+
+ translation_y : typing.Optional[str]
+
+ rotation3d_x : typing.Optional[str]
+
+ rotation3d_y : typing.Optional[str]
+
+ rotation3d_z : typing.Optional[str]
+
+ fps : typing.Optional[int]
+
+ seed : typing.Optional[int]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ DeforumSdPageResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AnimationPrompt
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.ai_animation_generator.deforum_sd(
+ animation_prompts=[
+ AnimationPrompt(
+ frame="frame",
+ prompt="prompt",
+ )
+ ],
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v2/DeforumSD/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "animation_prompts": animation_prompts,
+ "max_frames": max_frames,
+ "selected_model": selected_model,
+ "animation_mode": animation_mode,
+ "zoom": zoom,
+ "translation_x": translation_x,
+ "translation_y": translation_y,
+ "rotation_3d_x": rotation3d_x,
+ "rotation_3d_y": rotation3d_y,
+ "rotation_3d_z": rotation3d_z,
+ "fps": fps,
+ "seed": seed,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(DeforumSdPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def async_deforum_sd(
+ self,
+ *,
+ animation_prompts: typing.Sequence[AnimationPrompt],
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ max_frames: typing.Optional[int] = OMIT,
+ selected_model: typing.Optional[DeforumSdPageRequestSelectedModel] = OMIT,
+ animation_mode: typing.Optional[str] = OMIT,
+ zoom: typing.Optional[str] = OMIT,
+ translation_x: typing.Optional[str] = OMIT,
+ translation_y: typing.Optional[str] = OMIT,
+ rotation3d_x: typing.Optional[str] = OMIT,
+ rotation3d_y: typing.Optional[str] = OMIT,
+ rotation3d_z: typing.Optional[str] = OMIT,
+ fps: typing.Optional[int] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ animation_prompts : typing.Sequence[AnimationPrompt]
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ max_frames : typing.Optional[int]
+
+ selected_model : typing.Optional[DeforumSdPageRequestSelectedModel]
+
+ animation_mode : typing.Optional[str]
+
+ zoom : typing.Optional[str]
+
+ translation_x : typing.Optional[str]
+
+ translation_y : typing.Optional[str]
+
+ rotation3d_x : typing.Optional[str]
+
+ rotation3d_y : typing.Optional[str]
+
+ rotation3d_z : typing.Optional[str]
+
+ fps : typing.Optional[int]
+
+ seed : typing.Optional[int]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey import AnimationPrompt
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.ai_animation_generator.async_deforum_sd(
+ animation_prompts=[
+ AnimationPrompt(
+ frame="frame",
+ prompt="prompt",
+ )
+ ],
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/DeforumSD/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "animation_prompts": animation_prompts,
+ "max_frames": max_frames,
+ "selected_model": selected_model,
+ "animation_mode": animation_mode,
+ "zoom": zoom,
+ "translation_x": translation_x,
+ "translation_y": translation_y,
+ "rotation_3d_x": rotation3d_x,
+ "rotation_3d_y": rotation3d_y,
+ "rotation_3d_z": rotation3d_z,
+ "fps": fps,
+ "seed": seed,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def status_deforum_sd(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> DeforumSdPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ DeforumSdPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.ai_animation_generator.status_deforum_sd(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/DeforumSD/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(DeforumSdPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/ai_art_qr_code/__init__.py b/src/gooey/ai_art_qr_code/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/ai_art_qr_code/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/ai_art_qr_code/client.py b/src/gooey/ai_art_qr_code/client.py
new file mode 100644
index 0000000..c593a8d
--- /dev/null
+++ b/src/gooey/ai_art_qr_code/client.py
@@ -0,0 +1,867 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import pydantic_v1
+from ..core.request_options import RequestOptions
+from ..errors.internal_server_error import InternalServerError
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3
+from ..types.failed_reponse_model_v2 import FailedReponseModelV2
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.qr_code_generator_page_request_image_prompt_controlnet_models_item import (
+ QrCodeGeneratorPageRequestImagePromptControlnetModelsItem,
+)
+from ..types.qr_code_generator_page_request_selected_controlnet_model_item import (
+ QrCodeGeneratorPageRequestSelectedControlnetModelItem,
+)
+from ..types.qr_code_generator_page_request_selected_model import QrCodeGeneratorPageRequestSelectedModel
+from ..types.qr_code_generator_page_response import QrCodeGeneratorPageResponse
+from ..types.qr_code_generator_page_status_response import QrCodeGeneratorPageStatusResponse
+from ..types.recipe_function import RecipeFunction
+from ..types.run_settings import RunSettings
+from ..types.scheduler import Scheduler
+from ..types.vcard import Vcard
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class AiArtQrCodeClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def art_qr_code(
+ self,
+ *,
+ text_prompt: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ qr_code_data: typing.Optional[str] = OMIT,
+ qr_code_input_image: typing.Optional[str] = OMIT,
+ qr_code_vcard: typing.Optional[Vcard] = OMIT,
+ qr_code_file: typing.Optional[str] = OMIT,
+ use_url_shortener: typing.Optional[bool] = OMIT,
+ negative_prompt: typing.Optional[str] = OMIT,
+ image_prompt: typing.Optional[str] = OMIT,
+ image_prompt_controlnet_models: typing.Optional[
+ typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]
+ ] = OMIT,
+ image_prompt_strength: typing.Optional[float] = OMIT,
+ image_prompt_scale: typing.Optional[float] = OMIT,
+ image_prompt_pos_x: typing.Optional[float] = OMIT,
+ image_prompt_pos_y: typing.Optional[float] = OMIT,
+ selected_model: typing.Optional[QrCodeGeneratorPageRequestSelectedModel] = OMIT,
+ selected_controlnet_model: typing.Optional[
+ typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem]
+ ] = OMIT,
+ output_width: typing.Optional[int] = OMIT,
+ output_height: typing.Optional[int] = OMIT,
+ guidance_scale: typing.Optional[float] = OMIT,
+ controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[int] = OMIT,
+ scheduler: typing.Optional[Scheduler] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ obj_scale: typing.Optional[float] = OMIT,
+ obj_pos_x: typing.Optional[float] = OMIT,
+ obj_pos_y: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> QrCodeGeneratorPageResponse:
+ """
+ Parameters
+ ----------
+ text_prompt : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ qr_code_data : typing.Optional[str]
+
+ qr_code_input_image : typing.Optional[str]
+
+ qr_code_vcard : typing.Optional[Vcard]
+
+ qr_code_file : typing.Optional[str]
+
+ use_url_shortener : typing.Optional[bool]
+
+ negative_prompt : typing.Optional[str]
+
+ image_prompt : typing.Optional[str]
+
+ image_prompt_controlnet_models : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]]
+
+ image_prompt_strength : typing.Optional[float]
+
+ image_prompt_scale : typing.Optional[float]
+
+ image_prompt_pos_x : typing.Optional[float]
+
+ image_prompt_pos_y : typing.Optional[float]
+
+ selected_model : typing.Optional[QrCodeGeneratorPageRequestSelectedModel]
+
+ selected_controlnet_model : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem]]
+
+ output_width : typing.Optional[int]
+
+ output_height : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ scheduler : typing.Optional[Scheduler]
+
+ seed : typing.Optional[int]
+
+ obj_scale : typing.Optional[float]
+
+ obj_pos_x : typing.Optional[float]
+
+ obj_pos_y : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ QrCodeGeneratorPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.ai_art_qr_code.art_qr_code(
+ text_prompt="text_prompt",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v2/art-qr-code/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "qr_code_data": qr_code_data,
+ "qr_code_input_image": qr_code_input_image,
+ "qr_code_vcard": qr_code_vcard,
+ "qr_code_file": qr_code_file,
+ "use_url_shortener": use_url_shortener,
+ "text_prompt": text_prompt,
+ "negative_prompt": negative_prompt,
+ "image_prompt": image_prompt,
+ "image_prompt_controlnet_models": image_prompt_controlnet_models,
+ "image_prompt_strength": image_prompt_strength,
+ "image_prompt_scale": image_prompt_scale,
+ "image_prompt_pos_x": image_prompt_pos_x,
+ "image_prompt_pos_y": image_prompt_pos_y,
+ "selected_model": selected_model,
+ "selected_controlnet_model": selected_controlnet_model,
+ "output_width": output_width,
+ "output_height": output_height,
+ "guidance_scale": guidance_scale,
+ "controlnet_conditioning_scale": controlnet_conditioning_scale,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "scheduler": scheduler,
+ "seed": seed,
+ "obj_scale": obj_scale,
+ "obj_pos_x": obj_pos_x,
+ "obj_pos_y": obj_pos_y,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(QrCodeGeneratorPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def async_art_qr_code(
+ self,
+ *,
+ text_prompt: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ qr_code_data: typing.Optional[str] = OMIT,
+ qr_code_input_image: typing.Optional[str] = OMIT,
+ qr_code_vcard: typing.Optional[Vcard] = OMIT,
+ qr_code_file: typing.Optional[str] = OMIT,
+ use_url_shortener: typing.Optional[bool] = OMIT,
+ negative_prompt: typing.Optional[str] = OMIT,
+ image_prompt: typing.Optional[str] = OMIT,
+ image_prompt_controlnet_models: typing.Optional[
+ typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]
+ ] = OMIT,
+ image_prompt_strength: typing.Optional[float] = OMIT,
+ image_prompt_scale: typing.Optional[float] = OMIT,
+ image_prompt_pos_x: typing.Optional[float] = OMIT,
+ image_prompt_pos_y: typing.Optional[float] = OMIT,
+ selected_model: typing.Optional[QrCodeGeneratorPageRequestSelectedModel] = OMIT,
+ selected_controlnet_model: typing.Optional[
+ typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem]
+ ] = OMIT,
+ output_width: typing.Optional[int] = OMIT,
+ output_height: typing.Optional[int] = OMIT,
+ guidance_scale: typing.Optional[float] = OMIT,
+ controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[int] = OMIT,
+ scheduler: typing.Optional[Scheduler] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ obj_scale: typing.Optional[float] = OMIT,
+ obj_pos_x: typing.Optional[float] = OMIT,
+ obj_pos_y: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ text_prompt : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ qr_code_data : typing.Optional[str]
+
+ qr_code_input_image : typing.Optional[str]
+
+ qr_code_vcard : typing.Optional[Vcard]
+
+ qr_code_file : typing.Optional[str]
+
+ use_url_shortener : typing.Optional[bool]
+
+ negative_prompt : typing.Optional[str]
+
+ image_prompt : typing.Optional[str]
+
+ image_prompt_controlnet_models : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]]
+
+ image_prompt_strength : typing.Optional[float]
+
+ image_prompt_scale : typing.Optional[float]
+
+ image_prompt_pos_x : typing.Optional[float]
+
+ image_prompt_pos_y : typing.Optional[float]
+
+ selected_model : typing.Optional[QrCodeGeneratorPageRequestSelectedModel]
+
+ selected_controlnet_model : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem]]
+
+ output_width : typing.Optional[int]
+
+ output_height : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ scheduler : typing.Optional[Scheduler]
+
+ seed : typing.Optional[int]
+
+ obj_scale : typing.Optional[float]
+
+ obj_pos_x : typing.Optional[float]
+
+ obj_pos_y : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.ai_art_qr_code.async_art_qr_code(
+ text_prompt="text_prompt",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/art-qr-code/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "qr_code_data": qr_code_data,
+ "qr_code_input_image": qr_code_input_image,
+ "qr_code_vcard": qr_code_vcard,
+ "qr_code_file": qr_code_file,
+ "use_url_shortener": use_url_shortener,
+ "text_prompt": text_prompt,
+ "negative_prompt": negative_prompt,
+ "image_prompt": image_prompt,
+ "image_prompt_controlnet_models": image_prompt_controlnet_models,
+ "image_prompt_strength": image_prompt_strength,
+ "image_prompt_scale": image_prompt_scale,
+ "image_prompt_pos_x": image_prompt_pos_x,
+ "image_prompt_pos_y": image_prompt_pos_y,
+ "selected_model": selected_model,
+ "selected_controlnet_model": selected_controlnet_model,
+ "output_width": output_width,
+ "output_height": output_height,
+ "guidance_scale": guidance_scale,
+ "controlnet_conditioning_scale": controlnet_conditioning_scale,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "scheduler": scheduler,
+ "seed": seed,
+ "obj_scale": obj_scale,
+ "obj_pos_x": obj_pos_x,
+ "obj_pos_y": obj_pos_y,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def status_art_qr_code(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> QrCodeGeneratorPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ QrCodeGeneratorPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.ai_art_qr_code.status_art_qr_code(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/art-qr-code/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(QrCodeGeneratorPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncAiArtQrCodeClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def art_qr_code(
+ self,
+ *,
+ text_prompt: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ qr_code_data: typing.Optional[str] = OMIT,
+ qr_code_input_image: typing.Optional[str] = OMIT,
+ qr_code_vcard: typing.Optional[Vcard] = OMIT,
+ qr_code_file: typing.Optional[str] = OMIT,
+ use_url_shortener: typing.Optional[bool] = OMIT,
+ negative_prompt: typing.Optional[str] = OMIT,
+ image_prompt: typing.Optional[str] = OMIT,
+ image_prompt_controlnet_models: typing.Optional[
+ typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]
+ ] = OMIT,
+ image_prompt_strength: typing.Optional[float] = OMIT,
+ image_prompt_scale: typing.Optional[float] = OMIT,
+ image_prompt_pos_x: typing.Optional[float] = OMIT,
+ image_prompt_pos_y: typing.Optional[float] = OMIT,
+ selected_model: typing.Optional[QrCodeGeneratorPageRequestSelectedModel] = OMIT,
+ selected_controlnet_model: typing.Optional[
+ typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem]
+ ] = OMIT,
+ output_width: typing.Optional[int] = OMIT,
+ output_height: typing.Optional[int] = OMIT,
+ guidance_scale: typing.Optional[float] = OMIT,
+ controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[int] = OMIT,
+ scheduler: typing.Optional[Scheduler] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ obj_scale: typing.Optional[float] = OMIT,
+ obj_pos_x: typing.Optional[float] = OMIT,
+ obj_pos_y: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> QrCodeGeneratorPageResponse:
+ """
+ Parameters
+ ----------
+ text_prompt : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ qr_code_data : typing.Optional[str]
+
+ qr_code_input_image : typing.Optional[str]
+
+ qr_code_vcard : typing.Optional[Vcard]
+
+ qr_code_file : typing.Optional[str]
+
+ use_url_shortener : typing.Optional[bool]
+
+ negative_prompt : typing.Optional[str]
+
+ image_prompt : typing.Optional[str]
+
+ image_prompt_controlnet_models : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]]
+
+ image_prompt_strength : typing.Optional[float]
+
+ image_prompt_scale : typing.Optional[float]
+
+ image_prompt_pos_x : typing.Optional[float]
+
+ image_prompt_pos_y : typing.Optional[float]
+
+ selected_model : typing.Optional[QrCodeGeneratorPageRequestSelectedModel]
+
+ selected_controlnet_model : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem]]
+
+ output_width : typing.Optional[int]
+
+ output_height : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ scheduler : typing.Optional[Scheduler]
+
+ seed : typing.Optional[int]
+
+ obj_scale : typing.Optional[float]
+
+ obj_pos_x : typing.Optional[float]
+
+ obj_pos_y : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ QrCodeGeneratorPageResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.ai_art_qr_code.art_qr_code(
+ text_prompt="text_prompt",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v2/art-qr-code/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "qr_code_data": qr_code_data,
+ "qr_code_input_image": qr_code_input_image,
+ "qr_code_vcard": qr_code_vcard,
+ "qr_code_file": qr_code_file,
+ "use_url_shortener": use_url_shortener,
+ "text_prompt": text_prompt,
+ "negative_prompt": negative_prompt,
+ "image_prompt": image_prompt,
+ "image_prompt_controlnet_models": image_prompt_controlnet_models,
+ "image_prompt_strength": image_prompt_strength,
+ "image_prompt_scale": image_prompt_scale,
+ "image_prompt_pos_x": image_prompt_pos_x,
+ "image_prompt_pos_y": image_prompt_pos_y,
+ "selected_model": selected_model,
+ "selected_controlnet_model": selected_controlnet_model,
+ "output_width": output_width,
+ "output_height": output_height,
+ "guidance_scale": guidance_scale,
+ "controlnet_conditioning_scale": controlnet_conditioning_scale,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "scheduler": scheduler,
+ "seed": seed,
+ "obj_scale": obj_scale,
+ "obj_pos_x": obj_pos_x,
+ "obj_pos_y": obj_pos_y,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(QrCodeGeneratorPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def async_art_qr_code(
+ self,
+ *,
+ text_prompt: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ qr_code_data: typing.Optional[str] = OMIT,
+ qr_code_input_image: typing.Optional[str] = OMIT,
+ qr_code_vcard: typing.Optional[Vcard] = OMIT,
+ qr_code_file: typing.Optional[str] = OMIT,
+ use_url_shortener: typing.Optional[bool] = OMIT,
+ negative_prompt: typing.Optional[str] = OMIT,
+ image_prompt: typing.Optional[str] = OMIT,
+ image_prompt_controlnet_models: typing.Optional[
+ typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]
+ ] = OMIT,
+ image_prompt_strength: typing.Optional[float] = OMIT,
+ image_prompt_scale: typing.Optional[float] = OMIT,
+ image_prompt_pos_x: typing.Optional[float] = OMIT,
+ image_prompt_pos_y: typing.Optional[float] = OMIT,
+ selected_model: typing.Optional[QrCodeGeneratorPageRequestSelectedModel] = OMIT,
+ selected_controlnet_model: typing.Optional[
+ typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem]
+ ] = OMIT,
+ output_width: typing.Optional[int] = OMIT,
+ output_height: typing.Optional[int] = OMIT,
+ guidance_scale: typing.Optional[float] = OMIT,
+ controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[int] = OMIT,
+ scheduler: typing.Optional[Scheduler] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ obj_scale: typing.Optional[float] = OMIT,
+ obj_pos_x: typing.Optional[float] = OMIT,
+ obj_pos_y: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ text_prompt : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ qr_code_data : typing.Optional[str]
+
+ qr_code_input_image : typing.Optional[str]
+
+ qr_code_vcard : typing.Optional[Vcard]
+
+ qr_code_file : typing.Optional[str]
+
+ use_url_shortener : typing.Optional[bool]
+
+ negative_prompt : typing.Optional[str]
+
+ image_prompt : typing.Optional[str]
+
+ image_prompt_controlnet_models : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]]
+
+ image_prompt_strength : typing.Optional[float]
+
+ image_prompt_scale : typing.Optional[float]
+
+ image_prompt_pos_x : typing.Optional[float]
+
+ image_prompt_pos_y : typing.Optional[float]
+
+ selected_model : typing.Optional[QrCodeGeneratorPageRequestSelectedModel]
+
+ selected_controlnet_model : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem]]
+
+ output_width : typing.Optional[int]
+
+ output_height : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ scheduler : typing.Optional[Scheduler]
+
+ seed : typing.Optional[int]
+
+ obj_scale : typing.Optional[float]
+
+ obj_pos_x : typing.Optional[float]
+
+ obj_pos_y : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.ai_art_qr_code.async_art_qr_code(
+ text_prompt="text_prompt",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/art-qr-code/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "qr_code_data": qr_code_data,
+ "qr_code_input_image": qr_code_input_image,
+ "qr_code_vcard": qr_code_vcard,
+ "qr_code_file": qr_code_file,
+ "use_url_shortener": use_url_shortener,
+ "text_prompt": text_prompt,
+ "negative_prompt": negative_prompt,
+ "image_prompt": image_prompt,
+ "image_prompt_controlnet_models": image_prompt_controlnet_models,
+ "image_prompt_strength": image_prompt_strength,
+ "image_prompt_scale": image_prompt_scale,
+ "image_prompt_pos_x": image_prompt_pos_x,
+ "image_prompt_pos_y": image_prompt_pos_y,
+ "selected_model": selected_model,
+ "selected_controlnet_model": selected_controlnet_model,
+ "output_width": output_width,
+ "output_height": output_height,
+ "guidance_scale": guidance_scale,
+ "controlnet_conditioning_scale": controlnet_conditioning_scale,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "scheduler": scheduler,
+ "seed": seed,
+ "obj_scale": obj_scale,
+ "obj_pos_x": obj_pos_x,
+ "obj_pos_y": obj_pos_y,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def status_art_qr_code(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> QrCodeGeneratorPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ QrCodeGeneratorPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.ai_art_qr_code.status_art_qr_code(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/art-qr-code/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(QrCodeGeneratorPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/ai_background_changer/__init__.py b/src/gooey/ai_background_changer/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/ai_background_changer/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/ai_background_changer/client.py b/src/gooey/ai_background_changer/client.py
new file mode 100644
index 0000000..96db0e3
--- /dev/null
+++ b/src/gooey/ai_background_changer/client.py
@@ -0,0 +1,555 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import pydantic_v1
+from ..core.request_options import RequestOptions
+from ..errors.internal_server_error import InternalServerError
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3
+from ..types.failed_reponse_model_v2 import FailedReponseModelV2
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.image_segmentation_page_request_selected_model import ImageSegmentationPageRequestSelectedModel
+from ..types.image_segmentation_page_response import ImageSegmentationPageResponse
+from ..types.image_segmentation_page_status_response import ImageSegmentationPageStatusResponse
+from ..types.recipe_function import RecipeFunction
+from ..types.run_settings import RunSettings
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class AiBackgroundChangerClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def image_segmentation(
+ self,
+ *,
+ input_image: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ selected_model: typing.Optional[ImageSegmentationPageRequestSelectedModel] = OMIT,
+ mask_threshold: typing.Optional[float] = OMIT,
+ rect_persepective_transform: typing.Optional[bool] = OMIT,
+ reflection_opacity: typing.Optional[float] = OMIT,
+ obj_scale: typing.Optional[float] = OMIT,
+ obj_pos_x: typing.Optional[float] = OMIT,
+ obj_pos_y: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> ImageSegmentationPageResponse:
+ """
+ Parameters
+ ----------
+ input_image : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ selected_model : typing.Optional[ImageSegmentationPageRequestSelectedModel]
+
+ mask_threshold : typing.Optional[float]
+
+ rect_persepective_transform : typing.Optional[bool]
+
+ reflection_opacity : typing.Optional[float]
+
+ obj_scale : typing.Optional[float]
+
+ obj_pos_x : typing.Optional[float]
+
+ obj_pos_y : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ImageSegmentationPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.ai_background_changer.image_segmentation(
+ input_image="input_image",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v2/ImageSegmentation/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "input_image": input_image,
+ "selected_model": selected_model,
+ "mask_threshold": mask_threshold,
+ "rect_persepective_transform": rect_persepective_transform,
+ "reflection_opacity": reflection_opacity,
+ "obj_scale": obj_scale,
+ "obj_pos_x": obj_pos_x,
+ "obj_pos_y": obj_pos_y,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(ImageSegmentationPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def async_image_segmentation(
+ self,
+ *,
+ input_image: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ selected_model: typing.Optional[ImageSegmentationPageRequestSelectedModel] = OMIT,
+ mask_threshold: typing.Optional[float] = OMIT,
+ rect_persepective_transform: typing.Optional[bool] = OMIT,
+ reflection_opacity: typing.Optional[float] = OMIT,
+ obj_scale: typing.Optional[float] = OMIT,
+ obj_pos_x: typing.Optional[float] = OMIT,
+ obj_pos_y: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ input_image : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ selected_model : typing.Optional[ImageSegmentationPageRequestSelectedModel]
+
+ mask_threshold : typing.Optional[float]
+
+ rect_persepective_transform : typing.Optional[bool]
+
+ reflection_opacity : typing.Optional[float]
+
+ obj_scale : typing.Optional[float]
+
+ obj_pos_x : typing.Optional[float]
+
+ obj_pos_y : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.ai_background_changer.async_image_segmentation(
+ input_image="input_image",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/ImageSegmentation/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "input_image": input_image,
+ "selected_model": selected_model,
+ "mask_threshold": mask_threshold,
+ "rect_persepective_transform": rect_persepective_transform,
+ "reflection_opacity": reflection_opacity,
+ "obj_scale": obj_scale,
+ "obj_pos_x": obj_pos_x,
+ "obj_pos_y": obj_pos_y,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def status_image_segmentation(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> ImageSegmentationPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ImageSegmentationPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.ai_background_changer.status_image_segmentation(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/ImageSegmentation/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(ImageSegmentationPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncAiBackgroundChangerClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def image_segmentation(
+ self,
+ *,
+ input_image: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ selected_model: typing.Optional[ImageSegmentationPageRequestSelectedModel] = OMIT,
+ mask_threshold: typing.Optional[float] = OMIT,
+ rect_persepective_transform: typing.Optional[bool] = OMIT,
+ reflection_opacity: typing.Optional[float] = OMIT,
+ obj_scale: typing.Optional[float] = OMIT,
+ obj_pos_x: typing.Optional[float] = OMIT,
+ obj_pos_y: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> ImageSegmentationPageResponse:
+ """
+ Parameters
+ ----------
+ input_image : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ selected_model : typing.Optional[ImageSegmentationPageRequestSelectedModel]
+
+ mask_threshold : typing.Optional[float]
+
+ rect_persepective_transform : typing.Optional[bool]
+
+ reflection_opacity : typing.Optional[float]
+
+ obj_scale : typing.Optional[float]
+
+ obj_pos_x : typing.Optional[float]
+
+ obj_pos_y : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ImageSegmentationPageResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.ai_background_changer.image_segmentation(
+ input_image="input_image",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v2/ImageSegmentation/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "input_image": input_image,
+ "selected_model": selected_model,
+ "mask_threshold": mask_threshold,
+ "rect_persepective_transform": rect_persepective_transform,
+ "reflection_opacity": reflection_opacity,
+ "obj_scale": obj_scale,
+ "obj_pos_x": obj_pos_x,
+ "obj_pos_y": obj_pos_y,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(ImageSegmentationPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def async_image_segmentation(
+ self,
+ *,
+ input_image: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ selected_model: typing.Optional[ImageSegmentationPageRequestSelectedModel] = OMIT,
+ mask_threshold: typing.Optional[float] = OMIT,
+ rect_persepective_transform: typing.Optional[bool] = OMIT,
+ reflection_opacity: typing.Optional[float] = OMIT,
+ obj_scale: typing.Optional[float] = OMIT,
+ obj_pos_x: typing.Optional[float] = OMIT,
+ obj_pos_y: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ input_image : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ selected_model : typing.Optional[ImageSegmentationPageRequestSelectedModel]
+
+ mask_threshold : typing.Optional[float]
+
+ rect_persepective_transform : typing.Optional[bool]
+
+ reflection_opacity : typing.Optional[float]
+
+ obj_scale : typing.Optional[float]
+
+ obj_pos_x : typing.Optional[float]
+
+ obj_pos_y : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.ai_background_changer.async_image_segmentation(
+ input_image="input_image",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/ImageSegmentation/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "input_image": input_image,
+ "selected_model": selected_model,
+ "mask_threshold": mask_threshold,
+ "rect_persepective_transform": rect_persepective_transform,
+ "reflection_opacity": reflection_opacity,
+ "obj_scale": obj_scale,
+ "obj_pos_x": obj_pos_x,
+ "obj_pos_y": obj_pos_y,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def status_image_segmentation(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> ImageSegmentationPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ImageSegmentationPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.ai_background_changer.status_image_segmentation(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/ImageSegmentation/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(ImageSegmentationPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/ai_generated_photo_from_email_profile_lookup/__init__.py b/src/gooey/ai_generated_photo_from_email_profile_lookup/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/ai_generated_photo_from_email_profile_lookup/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/ai_generated_photo_from_email_profile_lookup/client.py b/src/gooey/ai_generated_photo_from_email_profile_lookup/client.py
new file mode 100644
index 0000000..dd8bd28
--- /dev/null
+++ b/src/gooey/ai_generated_photo_from_email_profile_lookup/client.py
@@ -0,0 +1,799 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import pydantic_v1
+from ..core.request_options import RequestOptions
+from ..errors.internal_server_error import InternalServerError
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3
+from ..types.email_face_inpainting_page_request_selected_model import EmailFaceInpaintingPageRequestSelectedModel
+from ..types.email_face_inpainting_page_response import EmailFaceInpaintingPageResponse
+from ..types.email_face_inpainting_page_status_response import EmailFaceInpaintingPageStatusResponse
+from ..types.failed_reponse_model_v2 import FailedReponseModelV2
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.recipe_function import RecipeFunction
+from ..types.run_settings import RunSettings
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class AiGeneratedPhotoFromEmailProfileLookupClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def email_face_inpainting(
+ self,
+ *,
+ text_prompt: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ email_address: typing.Optional[str] = OMIT,
+ twitter_handle: typing.Optional[str] = OMIT,
+ face_scale: typing.Optional[float] = OMIT,
+ face_pos_x: typing.Optional[float] = OMIT,
+ face_pos_y: typing.Optional[float] = OMIT,
+ selected_model: typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] = OMIT,
+ negative_prompt: typing.Optional[str] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[int] = OMIT,
+ upscale_factor: typing.Optional[float] = OMIT,
+ output_width: typing.Optional[int] = OMIT,
+ output_height: typing.Optional[int] = OMIT,
+ guidance_scale: typing.Optional[float] = OMIT,
+ should_send_email: typing.Optional[bool] = OMIT,
+ email_from: typing.Optional[str] = OMIT,
+ email_cc: typing.Optional[str] = OMIT,
+ email_bcc: typing.Optional[str] = OMIT,
+ email_subject: typing.Optional[str] = OMIT,
+ email_body: typing.Optional[str] = OMIT,
+ email_body_enable_html: typing.Optional[bool] = OMIT,
+ fallback_email_body: typing.Optional[str] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> EmailFaceInpaintingPageResponse:
+ """
+ Parameters
+ ----------
+ text_prompt : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ email_address : typing.Optional[str]
+
+ twitter_handle : typing.Optional[str]
+
+ face_scale : typing.Optional[float]
+
+ face_pos_x : typing.Optional[float]
+
+ face_pos_y : typing.Optional[float]
+
+ selected_model : typing.Optional[EmailFaceInpaintingPageRequestSelectedModel]
+
+ negative_prompt : typing.Optional[str]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ upscale_factor : typing.Optional[float]
+
+ output_width : typing.Optional[int]
+
+ output_height : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ should_send_email : typing.Optional[bool]
+
+ email_from : typing.Optional[str]
+
+ email_cc : typing.Optional[str]
+
+ email_bcc : typing.Optional[str]
+
+ email_subject : typing.Optional[str]
+
+ email_body : typing.Optional[str]
+
+ email_body_enable_html : typing.Optional[bool]
+
+ fallback_email_body : typing.Optional[str]
+
+ seed : typing.Optional[int]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ EmailFaceInpaintingPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.ai_generated_photo_from_email_profile_lookup.email_face_inpainting(
+ email_address="sean@dara.network",
+ text_prompt="winter's day in paris",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v2/EmailFaceInpainting/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "email_address": email_address,
+ "twitter_handle": twitter_handle,
+ "text_prompt": text_prompt,
+ "face_scale": face_scale,
+ "face_pos_x": face_pos_x,
+ "face_pos_y": face_pos_y,
+ "selected_model": selected_model,
+ "negative_prompt": negative_prompt,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "upscale_factor": upscale_factor,
+ "output_width": output_width,
+ "output_height": output_height,
+ "guidance_scale": guidance_scale,
+ "should_send_email": should_send_email,
+ "email_from": email_from,
+ "email_cc": email_cc,
+ "email_bcc": email_bcc,
+ "email_subject": email_subject,
+ "email_body": email_body,
+ "email_body_enable_html": email_body_enable_html,
+ "fallback_email_body": fallback_email_body,
+ "seed": seed,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(EmailFaceInpaintingPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def async_email_face_inpainting(
+ self,
+ *,
+ text_prompt: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ email_address: typing.Optional[str] = OMIT,
+ twitter_handle: typing.Optional[str] = OMIT,
+ face_scale: typing.Optional[float] = OMIT,
+ face_pos_x: typing.Optional[float] = OMIT,
+ face_pos_y: typing.Optional[float] = OMIT,
+ selected_model: typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] = OMIT,
+ negative_prompt: typing.Optional[str] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[int] = OMIT,
+ upscale_factor: typing.Optional[float] = OMIT,
+ output_width: typing.Optional[int] = OMIT,
+ output_height: typing.Optional[int] = OMIT,
+ guidance_scale: typing.Optional[float] = OMIT,
+ should_send_email: typing.Optional[bool] = OMIT,
+ email_from: typing.Optional[str] = OMIT,
+ email_cc: typing.Optional[str] = OMIT,
+ email_bcc: typing.Optional[str] = OMIT,
+ email_subject: typing.Optional[str] = OMIT,
+ email_body: typing.Optional[str] = OMIT,
+ email_body_enable_html: typing.Optional[bool] = OMIT,
+ fallback_email_body: typing.Optional[str] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ text_prompt : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ email_address : typing.Optional[str]
+
+ twitter_handle : typing.Optional[str]
+
+ face_scale : typing.Optional[float]
+
+ face_pos_x : typing.Optional[float]
+
+ face_pos_y : typing.Optional[float]
+
+ selected_model : typing.Optional[EmailFaceInpaintingPageRequestSelectedModel]
+
+ negative_prompt : typing.Optional[str]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ upscale_factor : typing.Optional[float]
+
+ output_width : typing.Optional[int]
+
+ output_height : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ should_send_email : typing.Optional[bool]
+
+ email_from : typing.Optional[str]
+
+ email_cc : typing.Optional[str]
+
+ email_bcc : typing.Optional[str]
+
+ email_subject : typing.Optional[str]
+
+ email_body : typing.Optional[str]
+
+ email_body_enable_html : typing.Optional[bool]
+
+ fallback_email_body : typing.Optional[str]
+
+ seed : typing.Optional[int]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.ai_generated_photo_from_email_profile_lookup.async_email_face_inpainting(
+ email_address="sean@dara.network",
+ text_prompt="winter's day in paris",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/EmailFaceInpainting/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "email_address": email_address,
+ "twitter_handle": twitter_handle,
+ "text_prompt": text_prompt,
+ "face_scale": face_scale,
+ "face_pos_x": face_pos_x,
+ "face_pos_y": face_pos_y,
+ "selected_model": selected_model,
+ "negative_prompt": negative_prompt,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "upscale_factor": upscale_factor,
+ "output_width": output_width,
+ "output_height": output_height,
+ "guidance_scale": guidance_scale,
+ "should_send_email": should_send_email,
+ "email_from": email_from,
+ "email_cc": email_cc,
+ "email_bcc": email_bcc,
+ "email_subject": email_subject,
+ "email_body": email_body,
+ "email_body_enable_html": email_body_enable_html,
+ "fallback_email_body": fallback_email_body,
+ "seed": seed,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def status_email_face_inpainting(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> EmailFaceInpaintingPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ EmailFaceInpaintingPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.ai_generated_photo_from_email_profile_lookup.status_email_face_inpainting(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/EmailFaceInpainting/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(EmailFaceInpaintingPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncAiGeneratedPhotoFromEmailProfileLookupClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def email_face_inpainting(
+ self,
+ *,
+ text_prompt: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ email_address: typing.Optional[str] = OMIT,
+ twitter_handle: typing.Optional[str] = OMIT,
+ face_scale: typing.Optional[float] = OMIT,
+ face_pos_x: typing.Optional[float] = OMIT,
+ face_pos_y: typing.Optional[float] = OMIT,
+ selected_model: typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] = OMIT,
+ negative_prompt: typing.Optional[str] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[int] = OMIT,
+ upscale_factor: typing.Optional[float] = OMIT,
+ output_width: typing.Optional[int] = OMIT,
+ output_height: typing.Optional[int] = OMIT,
+ guidance_scale: typing.Optional[float] = OMIT,
+ should_send_email: typing.Optional[bool] = OMIT,
+ email_from: typing.Optional[str] = OMIT,
+ email_cc: typing.Optional[str] = OMIT,
+ email_bcc: typing.Optional[str] = OMIT,
+ email_subject: typing.Optional[str] = OMIT,
+ email_body: typing.Optional[str] = OMIT,
+ email_body_enable_html: typing.Optional[bool] = OMIT,
+ fallback_email_body: typing.Optional[str] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> EmailFaceInpaintingPageResponse:
+ """
+ Parameters
+ ----------
+ text_prompt : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ email_address : typing.Optional[str]
+
+ twitter_handle : typing.Optional[str]
+
+ face_scale : typing.Optional[float]
+
+ face_pos_x : typing.Optional[float]
+
+ face_pos_y : typing.Optional[float]
+
+ selected_model : typing.Optional[EmailFaceInpaintingPageRequestSelectedModel]
+
+ negative_prompt : typing.Optional[str]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ upscale_factor : typing.Optional[float]
+
+ output_width : typing.Optional[int]
+
+ output_height : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ should_send_email : typing.Optional[bool]
+
+ email_from : typing.Optional[str]
+
+ email_cc : typing.Optional[str]
+
+ email_bcc : typing.Optional[str]
+
+ email_subject : typing.Optional[str]
+
+ email_body : typing.Optional[str]
+
+ email_body_enable_html : typing.Optional[bool]
+
+ fallback_email_body : typing.Optional[str]
+
+ seed : typing.Optional[int]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ EmailFaceInpaintingPageResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.ai_generated_photo_from_email_profile_lookup.email_face_inpainting(
+ email_address="sean@dara.network",
+ text_prompt="winter's day in paris",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v2/EmailFaceInpainting/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "email_address": email_address,
+ "twitter_handle": twitter_handle,
+ "text_prompt": text_prompt,
+ "face_scale": face_scale,
+ "face_pos_x": face_pos_x,
+ "face_pos_y": face_pos_y,
+ "selected_model": selected_model,
+ "negative_prompt": negative_prompt,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "upscale_factor": upscale_factor,
+ "output_width": output_width,
+ "output_height": output_height,
+ "guidance_scale": guidance_scale,
+ "should_send_email": should_send_email,
+ "email_from": email_from,
+ "email_cc": email_cc,
+ "email_bcc": email_bcc,
+ "email_subject": email_subject,
+ "email_body": email_body,
+ "email_body_enable_html": email_body_enable_html,
+ "fallback_email_body": fallback_email_body,
+ "seed": seed,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(EmailFaceInpaintingPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def async_email_face_inpainting(
+ self,
+ *,
+ text_prompt: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ email_address: typing.Optional[str] = OMIT,
+ twitter_handle: typing.Optional[str] = OMIT,
+ face_scale: typing.Optional[float] = OMIT,
+ face_pos_x: typing.Optional[float] = OMIT,
+ face_pos_y: typing.Optional[float] = OMIT,
+ selected_model: typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] = OMIT,
+ negative_prompt: typing.Optional[str] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[int] = OMIT,
+ upscale_factor: typing.Optional[float] = OMIT,
+ output_width: typing.Optional[int] = OMIT,
+ output_height: typing.Optional[int] = OMIT,
+ guidance_scale: typing.Optional[float] = OMIT,
+ should_send_email: typing.Optional[bool] = OMIT,
+ email_from: typing.Optional[str] = OMIT,
+ email_cc: typing.Optional[str] = OMIT,
+ email_bcc: typing.Optional[str] = OMIT,
+ email_subject: typing.Optional[str] = OMIT,
+ email_body: typing.Optional[str] = OMIT,
+ email_body_enable_html: typing.Optional[bool] = OMIT,
+ fallback_email_body: typing.Optional[str] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ text_prompt : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ email_address : typing.Optional[str]
+
+ twitter_handle : typing.Optional[str]
+
+ face_scale : typing.Optional[float]
+
+ face_pos_x : typing.Optional[float]
+
+ face_pos_y : typing.Optional[float]
+
+ selected_model : typing.Optional[EmailFaceInpaintingPageRequestSelectedModel]
+
+ negative_prompt : typing.Optional[str]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ upscale_factor : typing.Optional[float]
+
+ output_width : typing.Optional[int]
+
+ output_height : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ should_send_email : typing.Optional[bool]
+
+ email_from : typing.Optional[str]
+
+ email_cc : typing.Optional[str]
+
+ email_bcc : typing.Optional[str]
+
+ email_subject : typing.Optional[str]
+
+ email_body : typing.Optional[str]
+
+ email_body_enable_html : typing.Optional[bool]
+
+ fallback_email_body : typing.Optional[str]
+
+ seed : typing.Optional[int]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.ai_generated_photo_from_email_profile_lookup.async_email_face_inpainting(
+ email_address="sean@dara.network",
+ text_prompt="winter's day in paris",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/EmailFaceInpainting/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "email_address": email_address,
+ "twitter_handle": twitter_handle,
+ "text_prompt": text_prompt,
+ "face_scale": face_scale,
+ "face_pos_x": face_pos_x,
+ "face_pos_y": face_pos_y,
+ "selected_model": selected_model,
+ "negative_prompt": negative_prompt,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "upscale_factor": upscale_factor,
+ "output_width": output_width,
+ "output_height": output_height,
+ "guidance_scale": guidance_scale,
+ "should_send_email": should_send_email,
+ "email_from": email_from,
+ "email_cc": email_cc,
+ "email_bcc": email_bcc,
+ "email_subject": email_subject,
+ "email_body": email_body,
+ "email_body_enable_html": email_body_enable_html,
+ "fallback_email_body": fallback_email_body,
+ "seed": seed,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def status_email_face_inpainting(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> EmailFaceInpaintingPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ EmailFaceInpaintingPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.ai_generated_photo_from_email_profile_lookup.status_email_face_inpainting(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/EmailFaceInpainting/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(EmailFaceInpaintingPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/ai_image_with_a_face/__init__.py b/src/gooey/ai_image_with_a_face/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/ai_image_with_a_face/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/ai_image_with_a_face/client.py b/src/gooey/ai_image_with_a_face/client.py
new file mode 100644
index 0000000..ba00a32
--- /dev/null
+++ b/src/gooey/ai_image_with_a_face/client.py
@@ -0,0 +1,655 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import pydantic_v1
+from ..core.request_options import RequestOptions
+from ..errors.internal_server_error import InternalServerError
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3
+from ..types.face_inpainting_page_request_selected_model import FaceInpaintingPageRequestSelectedModel
+from ..types.face_inpainting_page_response import FaceInpaintingPageResponse
+from ..types.face_inpainting_page_status_response import FaceInpaintingPageStatusResponse
+from ..types.failed_reponse_model_v2 import FailedReponseModelV2
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.recipe_function import RecipeFunction
+from ..types.run_settings import RunSettings
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class AiImageWithAFaceClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def face_inpainting(
+ self,
+ *,
+ input_image: str,
+ text_prompt: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ face_scale: typing.Optional[float] = OMIT,
+ face_pos_x: typing.Optional[float] = OMIT,
+ face_pos_y: typing.Optional[float] = OMIT,
+ selected_model: typing.Optional[FaceInpaintingPageRequestSelectedModel] = OMIT,
+ negative_prompt: typing.Optional[str] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[int] = OMIT,
+ upscale_factor: typing.Optional[float] = OMIT,
+ output_width: typing.Optional[int] = OMIT,
+ output_height: typing.Optional[int] = OMIT,
+ guidance_scale: typing.Optional[float] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> FaceInpaintingPageResponse:
+ """
+ Parameters
+ ----------
+ input_image : str
+
+ text_prompt : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ face_scale : typing.Optional[float]
+
+ face_pos_x : typing.Optional[float]
+
+ face_pos_y : typing.Optional[float]
+
+ selected_model : typing.Optional[FaceInpaintingPageRequestSelectedModel]
+
+ negative_prompt : typing.Optional[str]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ upscale_factor : typing.Optional[float]
+
+ output_width : typing.Optional[int]
+
+ output_height : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ seed : typing.Optional[int]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ FaceInpaintingPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.ai_image_with_a_face.face_inpainting(
+ input_image="input_image",
+ text_prompt="tony stark from the iron man",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v2/FaceInpainting/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "input_image": input_image,
+ "text_prompt": text_prompt,
+ "face_scale": face_scale,
+ "face_pos_x": face_pos_x,
+ "face_pos_y": face_pos_y,
+ "selected_model": selected_model,
+ "negative_prompt": negative_prompt,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "upscale_factor": upscale_factor,
+ "output_width": output_width,
+ "output_height": output_height,
+ "guidance_scale": guidance_scale,
+ "seed": seed,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(FaceInpaintingPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def async_face_inpainting(
+ self,
+ *,
+ input_image: str,
+ text_prompt: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ face_scale: typing.Optional[float] = OMIT,
+ face_pos_x: typing.Optional[float] = OMIT,
+ face_pos_y: typing.Optional[float] = OMIT,
+ selected_model: typing.Optional[FaceInpaintingPageRequestSelectedModel] = OMIT,
+ negative_prompt: typing.Optional[str] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[int] = OMIT,
+ upscale_factor: typing.Optional[float] = OMIT,
+ output_width: typing.Optional[int] = OMIT,
+ output_height: typing.Optional[int] = OMIT,
+ guidance_scale: typing.Optional[float] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ input_image : str
+
+ text_prompt : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ face_scale : typing.Optional[float]
+
+ face_pos_x : typing.Optional[float]
+
+ face_pos_y : typing.Optional[float]
+
+ selected_model : typing.Optional[FaceInpaintingPageRequestSelectedModel]
+
+ negative_prompt : typing.Optional[str]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ upscale_factor : typing.Optional[float]
+
+ output_width : typing.Optional[int]
+
+ output_height : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ seed : typing.Optional[int]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.ai_image_with_a_face.async_face_inpainting(
+ input_image="input_image",
+ text_prompt="tony stark from the iron man",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/FaceInpainting/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "input_image": input_image,
+ "text_prompt": text_prompt,
+ "face_scale": face_scale,
+ "face_pos_x": face_pos_x,
+ "face_pos_y": face_pos_y,
+ "selected_model": selected_model,
+ "negative_prompt": negative_prompt,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "upscale_factor": upscale_factor,
+ "output_width": output_width,
+ "output_height": output_height,
+ "guidance_scale": guidance_scale,
+ "seed": seed,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def status_face_inpainting(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> FaceInpaintingPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ FaceInpaintingPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.ai_image_with_a_face.status_face_inpainting(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/FaceInpainting/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(FaceInpaintingPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncAiImageWithAFaceClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def face_inpainting(
+ self,
+ *,
+ input_image: str,
+ text_prompt: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ face_scale: typing.Optional[float] = OMIT,
+ face_pos_x: typing.Optional[float] = OMIT,
+ face_pos_y: typing.Optional[float] = OMIT,
+ selected_model: typing.Optional[FaceInpaintingPageRequestSelectedModel] = OMIT,
+ negative_prompt: typing.Optional[str] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[int] = OMIT,
+ upscale_factor: typing.Optional[float] = OMIT,
+ output_width: typing.Optional[int] = OMIT,
+ output_height: typing.Optional[int] = OMIT,
+ guidance_scale: typing.Optional[float] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> FaceInpaintingPageResponse:
+ """
+ Parameters
+ ----------
+ input_image : str
+
+ text_prompt : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ face_scale : typing.Optional[float]
+
+ face_pos_x : typing.Optional[float]
+
+ face_pos_y : typing.Optional[float]
+
+ selected_model : typing.Optional[FaceInpaintingPageRequestSelectedModel]
+
+ negative_prompt : typing.Optional[str]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ upscale_factor : typing.Optional[float]
+
+ output_width : typing.Optional[int]
+
+ output_height : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ seed : typing.Optional[int]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ FaceInpaintingPageResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.ai_image_with_a_face.face_inpainting(
+ input_image="input_image",
+ text_prompt="tony stark from the iron man",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v2/FaceInpainting/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "input_image": input_image,
+ "text_prompt": text_prompt,
+ "face_scale": face_scale,
+ "face_pos_x": face_pos_x,
+ "face_pos_y": face_pos_y,
+ "selected_model": selected_model,
+ "negative_prompt": negative_prompt,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "upscale_factor": upscale_factor,
+ "output_width": output_width,
+ "output_height": output_height,
+ "guidance_scale": guidance_scale,
+ "seed": seed,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(FaceInpaintingPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def async_face_inpainting(
+ self,
+ *,
+ input_image: str,
+ text_prompt: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ face_scale: typing.Optional[float] = OMIT,
+ face_pos_x: typing.Optional[float] = OMIT,
+ face_pos_y: typing.Optional[float] = OMIT,
+ selected_model: typing.Optional[FaceInpaintingPageRequestSelectedModel] = OMIT,
+ negative_prompt: typing.Optional[str] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[int] = OMIT,
+ upscale_factor: typing.Optional[float] = OMIT,
+ output_width: typing.Optional[int] = OMIT,
+ output_height: typing.Optional[int] = OMIT,
+ guidance_scale: typing.Optional[float] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ input_image : str
+
+ text_prompt : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ face_scale : typing.Optional[float]
+
+ face_pos_x : typing.Optional[float]
+
+ face_pos_y : typing.Optional[float]
+
+ selected_model : typing.Optional[FaceInpaintingPageRequestSelectedModel]
+
+ negative_prompt : typing.Optional[str]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ upscale_factor : typing.Optional[float]
+
+ output_width : typing.Optional[int]
+
+ output_height : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ seed : typing.Optional[int]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.ai_image_with_a_face.async_face_inpainting(
+ input_image="input_image",
+ text_prompt="tony stark from the iron man",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/FaceInpainting/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "input_image": input_image,
+ "text_prompt": text_prompt,
+ "face_scale": face_scale,
+ "face_pos_x": face_pos_x,
+ "face_pos_y": face_pos_y,
+ "selected_model": selected_model,
+ "negative_prompt": negative_prompt,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "upscale_factor": upscale_factor,
+ "output_width": output_width,
+ "output_height": output_height,
+ "guidance_scale": guidance_scale,
+ "seed": seed,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def status_face_inpainting(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> FaceInpaintingPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ FaceInpaintingPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.ai_image_with_a_face.status_face_inpainting(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/FaceInpainting/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(FaceInpaintingPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/bulk_runner/__init__.py b/src/gooey/bulk_runner/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/bulk_runner/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/bulk_runner/client.py b/src/gooey/bulk_runner/client.py
new file mode 100644
index 0000000..ef0070c
--- /dev/null
+++ b/src/gooey/bulk_runner/client.py
@@ -0,0 +1,550 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import pydantic_v1
+from ..core.request_options import RequestOptions
+from ..errors.internal_server_error import InternalServerError
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3
+from ..types.bulk_runner_page_response import BulkRunnerPageResponse
+from ..types.bulk_runner_page_status_response import BulkRunnerPageStatusResponse
+from ..types.failed_reponse_model_v2 import FailedReponseModelV2
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.recipe_function import RecipeFunction
+from ..types.run_settings import RunSettings
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class BulkRunnerClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def post(
+ self,
+ *,
+ documents: typing.Sequence[str],
+ run_urls: typing.Sequence[str],
+ input_columns: typing.Dict[str, str],
+ output_columns: typing.Dict[str, str],
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ eval_urls: typing.Optional[typing.Sequence[str]] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> BulkRunnerPageResponse:
+ """
+ Parameters
+ ----------
+ documents : typing.Sequence[str]
+ Upload or link to a CSV or google sheet that contains your sample input data.
+ For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs.
+ Remember to includes header names in your CSV too.
+
+ run_urls : typing.Sequence[str]
+ Provide one or more Gooey.AI workflow runs.
+ You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them.
+
+ input_columns : typing.Dict[str, str]
+ For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it.
+
+ output_columns : typing.Dict[str, str]
+ For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data.
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ eval_urls : typing.Optional[typing.Sequence[str]]
+ _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs.
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ BulkRunnerPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.bulk_runner.post(
+ documents=["documents"],
+ run_urls=["run_urls"],
+ input_columns={"input_columns": "input_columns"},
+ output_columns={"output_columns": "output_columns"},
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v2/bulk-runner/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "documents": documents,
+ "run_urls": run_urls,
+ "input_columns": input_columns,
+ "output_columns": output_columns,
+ "eval_urls": eval_urls,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(BulkRunnerPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def async_bulk_runner(
+ self,
+ *,
+ documents: typing.Sequence[str],
+ run_urls: typing.Sequence[str],
+ input_columns: typing.Dict[str, str],
+ output_columns: typing.Dict[str, str],
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ eval_urls: typing.Optional[typing.Sequence[str]] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ documents : typing.Sequence[str]
+ Upload or link to a CSV or google sheet that contains your sample input data.
+ For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs.
+ Remember to includes header names in your CSV too.
+
+ run_urls : typing.Sequence[str]
+ Provide one or more Gooey.AI workflow runs.
+ You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them.
+
+ input_columns : typing.Dict[str, str]
+ For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it.
+
+ output_columns : typing.Dict[str, str]
+ For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data.
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ eval_urls : typing.Optional[typing.Sequence[str]]
+ _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs.
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.bulk_runner.async_bulk_runner(
+ documents=["documents"],
+ run_urls=["run_urls"],
+ input_columns={"input_columns": "input_columns"},
+ output_columns={"output_columns": "output_columns"},
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/bulk-runner/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "documents": documents,
+ "run_urls": run_urls,
+ "input_columns": input_columns,
+ "output_columns": output_columns,
+ "eval_urls": eval_urls,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def status_bulk_runner(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> BulkRunnerPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ BulkRunnerPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.bulk_runner.status_bulk_runner(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/bulk-runner/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(BulkRunnerPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncBulkRunnerClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def post(
+ self,
+ *,
+ documents: typing.Sequence[str],
+ run_urls: typing.Sequence[str],
+ input_columns: typing.Dict[str, str],
+ output_columns: typing.Dict[str, str],
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ eval_urls: typing.Optional[typing.Sequence[str]] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> BulkRunnerPageResponse:
+ """
+ Parameters
+ ----------
+ documents : typing.Sequence[str]
+ Upload or link to a CSV or google sheet that contains your sample input data.
+ For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs.
+ Remember to includes header names in your CSV too.
+
+ run_urls : typing.Sequence[str]
+ Provide one or more Gooey.AI workflow runs.
+ You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them.
+
+ input_columns : typing.Dict[str, str]
+ For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it.
+
+ output_columns : typing.Dict[str, str]
+ For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data.
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ eval_urls : typing.Optional[typing.Sequence[str]]
+ _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs.
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ BulkRunnerPageResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.bulk_runner.post(
+ documents=["documents"],
+ run_urls=["run_urls"],
+ input_columns={"input_columns": "input_columns"},
+ output_columns={"output_columns": "output_columns"},
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v2/bulk-runner/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "documents": documents,
+ "run_urls": run_urls,
+ "input_columns": input_columns,
+ "output_columns": output_columns,
+ "eval_urls": eval_urls,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(BulkRunnerPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def async_bulk_runner(
+ self,
+ *,
+ documents: typing.Sequence[str],
+ run_urls: typing.Sequence[str],
+ input_columns: typing.Dict[str, str],
+ output_columns: typing.Dict[str, str],
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ eval_urls: typing.Optional[typing.Sequence[str]] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ documents : typing.Sequence[str]
+ Upload or link to a CSV or google sheet that contains your sample input data.
+ For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs.
+ Remember to includes header names in your CSV too.
+
+ run_urls : typing.Sequence[str]
+ Provide one or more Gooey.AI workflow runs.
+ You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them.
+
+ input_columns : typing.Dict[str, str]
+ For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it.
+
+ output_columns : typing.Dict[str, str]
+ For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data.
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ eval_urls : typing.Optional[typing.Sequence[str]]
+ _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs.
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.bulk_runner.async_bulk_runner(
+ documents=["documents"],
+ run_urls=["run_urls"],
+ input_columns={"input_columns": "input_columns"},
+ output_columns={"output_columns": "output_columns"},
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/bulk-runner/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "documents": documents,
+ "run_urls": run_urls,
+ "input_columns": input_columns,
+ "output_columns": output_columns,
+ "eval_urls": eval_urls,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def status_bulk_runner(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> BulkRunnerPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ BulkRunnerPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.bulk_runner.status_bulk_runner(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/bulk-runner/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(BulkRunnerPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/chyron_plant_bot/__init__.py b/src/gooey/chyron_plant_bot/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/chyron_plant_bot/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/chyron_plant_bot/client.py b/src/gooey/chyron_plant_bot/client.py
new file mode 100644
index 0000000..1b50bb3
--- /dev/null
+++ b/src/gooey/chyron_plant_bot/client.py
@@ -0,0 +1,474 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import pydantic_v1
+from ..core.request_options import RequestOptions
+from ..errors.internal_server_error import InternalServerError
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3
+from ..types.chyron_plant_page_response import ChyronPlantPageResponse
+from ..types.chyron_plant_page_status_response import ChyronPlantPageStatusResponse
+from ..types.failed_reponse_model_v2 import FailedReponseModelV2
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.recipe_function import RecipeFunction
+from ..types.run_settings import RunSettings
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class ChyronPlantBotClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def chyron_plant(
+ self,
+ *,
+ midi_notes: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ midi_notes_prompt: typing.Optional[str] = OMIT,
+ chyron_prompt: typing.Optional[str] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> ChyronPlantPageResponse:
+ """
+ Parameters
+ ----------
+ midi_notes : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ midi_notes_prompt : typing.Optional[str]
+
+ chyron_prompt : typing.Optional[str]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ChyronPlantPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.chyron_plant_bot.chyron_plant(
+ midi_notes="C#1 B6 A2 A1 A3 A2",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v2/ChyronPlant/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "midi_notes": midi_notes,
+ "midi_notes_prompt": midi_notes_prompt,
+ "chyron_prompt": chyron_prompt,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(ChyronPlantPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def async_chyron_plant(
+ self,
+ *,
+ midi_notes: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ midi_notes_prompt: typing.Optional[str] = OMIT,
+ chyron_prompt: typing.Optional[str] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ midi_notes : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ midi_notes_prompt : typing.Optional[str]
+
+ chyron_prompt : typing.Optional[str]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.chyron_plant_bot.async_chyron_plant(
+ midi_notes="C#1 B6 A2 A1 A3 A2",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/ChyronPlant/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "midi_notes": midi_notes,
+ "midi_notes_prompt": midi_notes_prompt,
+ "chyron_prompt": chyron_prompt,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def status_chyron_plant(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> ChyronPlantPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ChyronPlantPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.chyron_plant_bot.status_chyron_plant(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/ChyronPlant/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(ChyronPlantPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncChyronPlantBotClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def chyron_plant(
+ self,
+ *,
+ midi_notes: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ midi_notes_prompt: typing.Optional[str] = OMIT,
+ chyron_prompt: typing.Optional[str] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> ChyronPlantPageResponse:
+ """
+ Parameters
+ ----------
+ midi_notes : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ midi_notes_prompt : typing.Optional[str]
+
+ chyron_prompt : typing.Optional[str]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ChyronPlantPageResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.chyron_plant_bot.chyron_plant(
+ midi_notes="C#1 B6 A2 A1 A3 A2",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v2/ChyronPlant/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "midi_notes": midi_notes,
+ "midi_notes_prompt": midi_notes_prompt,
+ "chyron_prompt": chyron_prompt,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(ChyronPlantPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def async_chyron_plant(
+ self,
+ *,
+ midi_notes: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ midi_notes_prompt: typing.Optional[str] = OMIT,
+ chyron_prompt: typing.Optional[str] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ midi_notes : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ midi_notes_prompt : typing.Optional[str]
+
+ chyron_prompt : typing.Optional[str]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.chyron_plant_bot.async_chyron_plant(
+ midi_notes="C#1 B6 A2 A1 A3 A2",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/ChyronPlant/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "midi_notes": midi_notes,
+ "midi_notes_prompt": midi_notes_prompt,
+ "chyron_prompt": chyron_prompt,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def status_chyron_plant(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> ChyronPlantPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ChyronPlantPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.chyron_plant_bot.status_chyron_plant(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/ChyronPlant/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(ChyronPlantPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/client.py b/src/gooey/client.py
new file mode 100644
index 0000000..b5f4ad6
--- /dev/null
+++ b/src/gooey/client.py
@@ -0,0 +1,303 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import httpx
+
+from .ai_animation_generator.client import AiAnimationGeneratorClient, AsyncAiAnimationGeneratorClient
+from .ai_art_qr_code.client import AiArtQrCodeClient, AsyncAiArtQrCodeClient
+from .ai_background_changer.client import AiBackgroundChangerClient, AsyncAiBackgroundChangerClient
+from .ai_generated_photo_from_email_profile_lookup.client import (
+ AiGeneratedPhotoFromEmailProfileLookupClient,
+ AsyncAiGeneratedPhotoFromEmailProfileLookupClient,
+)
+from .ai_image_with_a_face.client import AiImageWithAFaceClient, AsyncAiImageWithAFaceClient
+from .bulk_runner.client import AsyncBulkRunnerClient, BulkRunnerClient
+from .chyron_plant_bot.client import AsyncChyronPlantBotClient, ChyronPlantBotClient
+from .compare_ai_image_generators.client import AsyncCompareAiImageGeneratorsClient, CompareAiImageGeneratorsClient
+from .compare_ai_image_upscalers.client import AsyncCompareAiImageUpscalersClient, CompareAiImageUpscalersClient
+from .compare_ai_translations.client import AsyncCompareAiTranslationsClient, CompareAiTranslationsClient
+from .compare_ai_voice_generators.client import AsyncCompareAiVoiceGeneratorsClient, CompareAiVoiceGeneratorsClient
+from .copilot_for_your_enterprise.client import AsyncCopilotForYourEnterpriseClient, CopilotForYourEnterpriseClient
+from .copilot_integrations.client import AsyncCopilotIntegrationsClient, CopilotIntegrationsClient
+from .core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from .create_a_perfect_seo_optimized_title_paragraph.client import (
+ AsyncCreateAPerfectSeoOptimizedTitleParagraphClient,
+ CreateAPerfectSeoOptimizedTitleParagraphClient,
+)
+from .edit_an_image_with_ai_prompt.client import AsyncEditAnImageWithAiPromptClient, EditAnImageWithAiPromptClient
+from .embeddings.client import AsyncEmbeddingsClient, EmbeddingsClient
+from .environment import GooeyEnvironment
+from .evaluator.client import AsyncEvaluatorClient, EvaluatorClient
+from .functions.client import AsyncFunctionsClient, FunctionsClient
+from .generate_people_also_ask_seo_content.client import (
+ AsyncGeneratePeopleAlsoAskSeoContentClient,
+ GeneratePeopleAlsoAskSeoContentClient,
+)
+from .generate_product_photo_backgrounds.client import (
+ AsyncGenerateProductPhotoBackgroundsClient,
+ GenerateProductPhotoBackgroundsClient,
+)
+from .large_language_models_gpt3.client import AsyncLargeLanguageModelsGpt3Client, LargeLanguageModelsGpt3Client
+from .letter_writer.client import AsyncLetterWriterClient, LetterWriterClient
+from .lip_syncing.client import AsyncLipSyncingClient, LipSyncingClient
+from .lipsync_video_with_any_text.client import AsyncLipsyncVideoWithAnyTextClient, LipsyncVideoWithAnyTextClient
+from .misc.client import AsyncMiscClient, MiscClient
+from .people_also_ask_answers_from_a_doc.client import (
+ AsyncPeopleAlsoAskAnswersFromADocClient,
+ PeopleAlsoAskAnswersFromADocClient,
+)
+from .profile_lookup_gpt3for_ai_personalized_emails.client import (
+ AsyncProfileLookupGpt3ForAiPersonalizedEmailsClient,
+ ProfileLookupGpt3ForAiPersonalizedEmailsClient,
+)
+from .render_image_search_results_with_ai.client import (
+ AsyncRenderImageSearchResultsWithAiClient,
+ RenderImageSearchResultsWithAiClient,
+)
+from .search_your_docs_with_gpt.client import AsyncSearchYourDocsWithGptClient, SearchYourDocsWithGptClient
+from .smart_gpt.client import AsyncSmartGptClient, SmartGptClient
+from .speech_recognition_translation.client import (
+ AsyncSpeechRecognitionTranslationClient,
+ SpeechRecognitionTranslationClient,
+)
+from .summarize_your_docs_with_gpt.client import AsyncSummarizeYourDocsWithGptClient, SummarizeYourDocsWithGptClient
+from .synthetic_data_maker_for_videos_pd_fs.client import (
+ AsyncSyntheticDataMakerForVideosPdFsClient,
+ SyntheticDataMakerForVideosPdFsClient,
+)
+from .text_guided_audio_generator.client import AsyncTextGuidedAudioGeneratorClient, TextGuidedAudioGeneratorClient
+from .web_search_gpt3.client import AsyncWebSearchGpt3Client, WebSearchGpt3Client
+
+
+class Gooey:
+ """
+ Use this class to access the different functions within the SDK. You can instantiate any number of clients with different configuration that will propagate to these functions.
+
+ Parameters
+ ----------
+ base_url : typing.Optional[str]
+ The base url to use for requests from the client.
+
+ environment : GooeyEnvironment
+ The environment to use for requests from the client. from .environment import GooeyEnvironment
+
+
+
+ Defaults to GooeyEnvironment.DEFAULT
+
+
+
+ authorization : typing.Optional[str]
+ timeout : typing.Optional[float]
+ The timeout to be used, in seconds, for requests. By default the timeout is 60 seconds, unless a custom httpx client is used, in which case this default is not enforced.
+
+ follow_redirects : typing.Optional[bool]
+ Whether the default httpx client follows redirects or not, this is irrelevant if a custom httpx client is passed in.
+
+ httpx_client : typing.Optional[httpx.Client]
+ The httpx client to use for making requests, a preconfigured client is used by default, however this is useful should you want to pass in any custom httpx configuration.
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ """
+
+ def __init__(
+ self,
+ *,
+ base_url: typing.Optional[str] = None,
+ environment: GooeyEnvironment = GooeyEnvironment.DEFAULT,
+ authorization: typing.Optional[str] = None,
+ timeout: typing.Optional[float] = None,
+ follow_redirects: typing.Optional[bool] = True,
+ httpx_client: typing.Optional[httpx.Client] = None
+ ):
+ _defaulted_timeout = timeout if timeout is not None else 60 if httpx_client is None else None
+ self._client_wrapper = SyncClientWrapper(
+ base_url=_get_base_url(base_url=base_url, environment=environment),
+ authorization=authorization,
+ httpx_client=httpx_client
+ if httpx_client is not None
+ else httpx.Client(timeout=_defaulted_timeout, follow_redirects=follow_redirects)
+ if follow_redirects is not None
+ else httpx.Client(timeout=_defaulted_timeout),
+ timeout=_defaulted_timeout,
+ )
+ self.copilot_integrations = CopilotIntegrationsClient(client_wrapper=self._client_wrapper)
+ self.copilot_for_your_enterprise = CopilotForYourEnterpriseClient(client_wrapper=self._client_wrapper)
+ self.ai_animation_generator = AiAnimationGeneratorClient(client_wrapper=self._client_wrapper)
+ self.ai_art_qr_code = AiArtQrCodeClient(client_wrapper=self._client_wrapper)
+ self.generate_people_also_ask_seo_content = GeneratePeopleAlsoAskSeoContentClient(
+ client_wrapper=self._client_wrapper
+ )
+ self.create_a_perfect_seo_optimized_title_paragraph = CreateAPerfectSeoOptimizedTitleParagraphClient(
+ client_wrapper=self._client_wrapper
+ )
+ self.web_search_gpt3 = WebSearchGpt3Client(client_wrapper=self._client_wrapper)
+ self.profile_lookup_gpt3for_ai_personalized_emails = ProfileLookupGpt3ForAiPersonalizedEmailsClient(
+ client_wrapper=self._client_wrapper
+ )
+ self.bulk_runner = BulkRunnerClient(client_wrapper=self._client_wrapper)
+ self.evaluator = EvaluatorClient(client_wrapper=self._client_wrapper)
+ self.synthetic_data_maker_for_videos_pd_fs = SyntheticDataMakerForVideosPdFsClient(
+ client_wrapper=self._client_wrapper
+ )
+ self.large_language_models_gpt3 = LargeLanguageModelsGpt3Client(client_wrapper=self._client_wrapper)
+ self.search_your_docs_with_gpt = SearchYourDocsWithGptClient(client_wrapper=self._client_wrapper)
+ self.smart_gpt = SmartGptClient(client_wrapper=self._client_wrapper)
+ self.summarize_your_docs_with_gpt = SummarizeYourDocsWithGptClient(client_wrapper=self._client_wrapper)
+ self.functions = FunctionsClient(client_wrapper=self._client_wrapper)
+ self.lip_syncing = LipSyncingClient(client_wrapper=self._client_wrapper)
+ self.lipsync_video_with_any_text = LipsyncVideoWithAnyTextClient(client_wrapper=self._client_wrapper)
+ self.compare_ai_voice_generators = CompareAiVoiceGeneratorsClient(client_wrapper=self._client_wrapper)
+ self.speech_recognition_translation = SpeechRecognitionTranslationClient(client_wrapper=self._client_wrapper)
+ self.text_guided_audio_generator = TextGuidedAudioGeneratorClient(client_wrapper=self._client_wrapper)
+ self.compare_ai_translations = CompareAiTranslationsClient(client_wrapper=self._client_wrapper)
+ self.edit_an_image_with_ai_prompt = EditAnImageWithAiPromptClient(client_wrapper=self._client_wrapper)
+ self.compare_ai_image_generators = CompareAiImageGeneratorsClient(client_wrapper=self._client_wrapper)
+ self.generate_product_photo_backgrounds = GenerateProductPhotoBackgroundsClient(
+ client_wrapper=self._client_wrapper
+ )
+ self.ai_image_with_a_face = AiImageWithAFaceClient(client_wrapper=self._client_wrapper)
+ self.ai_generated_photo_from_email_profile_lookup = AiGeneratedPhotoFromEmailProfileLookupClient(
+ client_wrapper=self._client_wrapper
+ )
+ self.render_image_search_results_with_ai = RenderImageSearchResultsWithAiClient(
+ client_wrapper=self._client_wrapper
+ )
+ self.ai_background_changer = AiBackgroundChangerClient(client_wrapper=self._client_wrapper)
+ self.compare_ai_image_upscalers = CompareAiImageUpscalersClient(client_wrapper=self._client_wrapper)
+ self.chyron_plant_bot = ChyronPlantBotClient(client_wrapper=self._client_wrapper)
+ self.letter_writer = LetterWriterClient(client_wrapper=self._client_wrapper)
+ self.embeddings = EmbeddingsClient(client_wrapper=self._client_wrapper)
+ self.people_also_ask_answers_from_a_doc = PeopleAlsoAskAnswersFromADocClient(
+ client_wrapper=self._client_wrapper
+ )
+ self.misc = MiscClient(client_wrapper=self._client_wrapper)
+
+
+class AsyncGooey:
+ """
+ Use this class to access the different functions within the SDK. You can instantiate any number of clients with different configuration that will propagate to these functions.
+
+ Parameters
+ ----------
+ base_url : typing.Optional[str]
+ The base url to use for requests from the client.
+
+ environment : GooeyEnvironment
+ The environment to use for requests from the client. from .environment import GooeyEnvironment
+
+
+
+ Defaults to GooeyEnvironment.DEFAULT
+
+
+
+ authorization : typing.Optional[str]
+ timeout : typing.Optional[float]
+ The timeout to be used, in seconds, for requests. By default the timeout is 60 seconds, unless a custom httpx client is used, in which case this default is not enforced.
+
+ follow_redirects : typing.Optional[bool]
+ Whether the default httpx client follows redirects or not, this is irrelevant if a custom httpx client is passed in.
+
+ httpx_client : typing.Optional[httpx.AsyncClient]
+ The httpx client to use for making requests, a preconfigured client is used by default, however this is useful should you want to pass in any custom httpx configuration.
+
+ Examples
+ --------
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ """
+
+ def __init__(
+ self,
+ *,
+ base_url: typing.Optional[str] = None,
+ environment: GooeyEnvironment = GooeyEnvironment.DEFAULT,
+ authorization: typing.Optional[str] = None,
+ timeout: typing.Optional[float] = None,
+ follow_redirects: typing.Optional[bool] = True,
+ httpx_client: typing.Optional[httpx.AsyncClient] = None
+ ):
+ _defaulted_timeout = timeout if timeout is not None else 60 if httpx_client is None else None
+ self._client_wrapper = AsyncClientWrapper(
+ base_url=_get_base_url(base_url=base_url, environment=environment),
+ authorization=authorization,
+ httpx_client=httpx_client
+ if httpx_client is not None
+ else httpx.AsyncClient(timeout=_defaulted_timeout, follow_redirects=follow_redirects)
+ if follow_redirects is not None
+ else httpx.AsyncClient(timeout=_defaulted_timeout),
+ timeout=_defaulted_timeout,
+ )
+ self.copilot_integrations = AsyncCopilotIntegrationsClient(client_wrapper=self._client_wrapper)
+ self.copilot_for_your_enterprise = AsyncCopilotForYourEnterpriseClient(client_wrapper=self._client_wrapper)
+ self.ai_animation_generator = AsyncAiAnimationGeneratorClient(client_wrapper=self._client_wrapper)
+ self.ai_art_qr_code = AsyncAiArtQrCodeClient(client_wrapper=self._client_wrapper)
+ self.generate_people_also_ask_seo_content = AsyncGeneratePeopleAlsoAskSeoContentClient(
+ client_wrapper=self._client_wrapper
+ )
+ self.create_a_perfect_seo_optimized_title_paragraph = AsyncCreateAPerfectSeoOptimizedTitleParagraphClient(
+ client_wrapper=self._client_wrapper
+ )
+ self.web_search_gpt3 = AsyncWebSearchGpt3Client(client_wrapper=self._client_wrapper)
+ self.profile_lookup_gpt3for_ai_personalized_emails = AsyncProfileLookupGpt3ForAiPersonalizedEmailsClient(
+ client_wrapper=self._client_wrapper
+ )
+ self.bulk_runner = AsyncBulkRunnerClient(client_wrapper=self._client_wrapper)
+ self.evaluator = AsyncEvaluatorClient(client_wrapper=self._client_wrapper)
+ self.synthetic_data_maker_for_videos_pd_fs = AsyncSyntheticDataMakerForVideosPdFsClient(
+ client_wrapper=self._client_wrapper
+ )
+ self.large_language_models_gpt3 = AsyncLargeLanguageModelsGpt3Client(client_wrapper=self._client_wrapper)
+ self.search_your_docs_with_gpt = AsyncSearchYourDocsWithGptClient(client_wrapper=self._client_wrapper)
+ self.smart_gpt = AsyncSmartGptClient(client_wrapper=self._client_wrapper)
+ self.summarize_your_docs_with_gpt = AsyncSummarizeYourDocsWithGptClient(client_wrapper=self._client_wrapper)
+ self.functions = AsyncFunctionsClient(client_wrapper=self._client_wrapper)
+ self.lip_syncing = AsyncLipSyncingClient(client_wrapper=self._client_wrapper)
+ self.lipsync_video_with_any_text = AsyncLipsyncVideoWithAnyTextClient(client_wrapper=self._client_wrapper)
+ self.compare_ai_voice_generators = AsyncCompareAiVoiceGeneratorsClient(client_wrapper=self._client_wrapper)
+ self.speech_recognition_translation = AsyncSpeechRecognitionTranslationClient(
+ client_wrapper=self._client_wrapper
+ )
+ self.text_guided_audio_generator = AsyncTextGuidedAudioGeneratorClient(client_wrapper=self._client_wrapper)
+ self.compare_ai_translations = AsyncCompareAiTranslationsClient(client_wrapper=self._client_wrapper)
+ self.edit_an_image_with_ai_prompt = AsyncEditAnImageWithAiPromptClient(client_wrapper=self._client_wrapper)
+ self.compare_ai_image_generators = AsyncCompareAiImageGeneratorsClient(client_wrapper=self._client_wrapper)
+ self.generate_product_photo_backgrounds = AsyncGenerateProductPhotoBackgroundsClient(
+ client_wrapper=self._client_wrapper
+ )
+ self.ai_image_with_a_face = AsyncAiImageWithAFaceClient(client_wrapper=self._client_wrapper)
+ self.ai_generated_photo_from_email_profile_lookup = AsyncAiGeneratedPhotoFromEmailProfileLookupClient(
+ client_wrapper=self._client_wrapper
+ )
+ self.render_image_search_results_with_ai = AsyncRenderImageSearchResultsWithAiClient(
+ client_wrapper=self._client_wrapper
+ )
+ self.ai_background_changer = AsyncAiBackgroundChangerClient(client_wrapper=self._client_wrapper)
+ self.compare_ai_image_upscalers = AsyncCompareAiImageUpscalersClient(client_wrapper=self._client_wrapper)
+ self.chyron_plant_bot = AsyncChyronPlantBotClient(client_wrapper=self._client_wrapper)
+ self.letter_writer = AsyncLetterWriterClient(client_wrapper=self._client_wrapper)
+ self.embeddings = AsyncEmbeddingsClient(client_wrapper=self._client_wrapper)
+ self.people_also_ask_answers_from_a_doc = AsyncPeopleAlsoAskAnswersFromADocClient(
+ client_wrapper=self._client_wrapper
+ )
+ self.misc = AsyncMiscClient(client_wrapper=self._client_wrapper)
+
+
+def _get_base_url(*, base_url: typing.Optional[str] = None, environment: GooeyEnvironment) -> str:
+ if base_url is not None:
+ return base_url
+ elif environment is not None:
+ return environment.value
+ else:
+ raise Exception("Please pass in either base_url or environment to construct the client")
diff --git a/src/gooey/compare_ai_image_generators/__init__.py b/src/gooey/compare_ai_image_generators/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/compare_ai_image_generators/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/compare_ai_image_generators/client.py b/src/gooey/compare_ai_image_generators/client.py
new file mode 100644
index 0000000..7e9c418
--- /dev/null
+++ b/src/gooey/compare_ai_image_generators/client.py
@@ -0,0 +1,668 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import pydantic_v1
+from ..core.request_options import RequestOptions
+from ..errors.internal_server_error import InternalServerError
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3
+from ..types.compare_text2img_page_request_selected_models_item import CompareText2ImgPageRequestSelectedModelsItem
+from ..types.compare_text2img_page_response import CompareText2ImgPageResponse
+from ..types.compare_text2img_page_status_response import CompareText2ImgPageStatusResponse
+from ..types.failed_reponse_model_v2 import FailedReponseModelV2
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.recipe_function import RecipeFunction
+from ..types.run_settings import RunSettings
+from ..types.scheduler import Scheduler
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class CompareAiImageGeneratorsClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def compare_text2img(
+ self,
+ *,
+ text_prompt: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ negative_prompt: typing.Optional[str] = OMIT,
+ output_width: typing.Optional[int] = OMIT,
+ output_height: typing.Optional[int] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[int] = OMIT,
+ dall_e3quality: typing.Optional[str] = OMIT,
+ dall_e3style: typing.Optional[str] = OMIT,
+ guidance_scale: typing.Optional[float] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ sd2upscaling: typing.Optional[bool] = OMIT,
+ selected_models: typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] = OMIT,
+ scheduler: typing.Optional[Scheduler] = OMIT,
+ edit_instruction: typing.Optional[str] = OMIT,
+ image_guidance_scale: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> CompareText2ImgPageResponse:
+ """
+ Parameters
+ ----------
+ text_prompt : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ negative_prompt : typing.Optional[str]
+
+ output_width : typing.Optional[int]
+
+ output_height : typing.Optional[int]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ dall_e3quality : typing.Optional[str]
+
+ dall_e3style : typing.Optional[str]
+
+ guidance_scale : typing.Optional[float]
+
+ seed : typing.Optional[int]
+
+ sd2upscaling : typing.Optional[bool]
+
+ selected_models : typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]]
+
+ scheduler : typing.Optional[Scheduler]
+
+ edit_instruction : typing.Optional[str]
+
+ image_guidance_scale : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ CompareText2ImgPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.compare_ai_image_generators.compare_text2img(
+ text_prompt="text_prompt",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v2/CompareText2Img/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "text_prompt": text_prompt,
+ "negative_prompt": negative_prompt,
+ "output_width": output_width,
+ "output_height": output_height,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "dall_e_3_quality": dall_e3quality,
+ "dall_e_3_style": dall_e3style,
+ "guidance_scale": guidance_scale,
+ "seed": seed,
+ "sd_2_upscaling": sd2upscaling,
+ "selected_models": selected_models,
+ "scheduler": scheduler,
+ "edit_instruction": edit_instruction,
+ "image_guidance_scale": image_guidance_scale,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(CompareText2ImgPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def async_compare_text2img(
+ self,
+ *,
+ text_prompt: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ negative_prompt: typing.Optional[str] = OMIT,
+ output_width: typing.Optional[int] = OMIT,
+ output_height: typing.Optional[int] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[int] = OMIT,
+ dall_e3quality: typing.Optional[str] = OMIT,
+ dall_e3style: typing.Optional[str] = OMIT,
+ guidance_scale: typing.Optional[float] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ sd2upscaling: typing.Optional[bool] = OMIT,
+ selected_models: typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] = OMIT,
+ scheduler: typing.Optional[Scheduler] = OMIT,
+ edit_instruction: typing.Optional[str] = OMIT,
+ image_guidance_scale: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ text_prompt : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ negative_prompt : typing.Optional[str]
+
+ output_width : typing.Optional[int]
+
+ output_height : typing.Optional[int]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ dall_e3quality : typing.Optional[str]
+
+ dall_e3style : typing.Optional[str]
+
+ guidance_scale : typing.Optional[float]
+
+ seed : typing.Optional[int]
+
+ sd2upscaling : typing.Optional[bool]
+
+ selected_models : typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]]
+
+ scheduler : typing.Optional[Scheduler]
+
+ edit_instruction : typing.Optional[str]
+
+ image_guidance_scale : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.compare_ai_image_generators.async_compare_text2img(
+ text_prompt="text_prompt",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/CompareText2Img/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "text_prompt": text_prompt,
+ "negative_prompt": negative_prompt,
+ "output_width": output_width,
+ "output_height": output_height,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "dall_e_3_quality": dall_e3quality,
+ "dall_e_3_style": dall_e3style,
+ "guidance_scale": guidance_scale,
+ "seed": seed,
+ "sd_2_upscaling": sd2upscaling,
+ "selected_models": selected_models,
+ "scheduler": scheduler,
+ "edit_instruction": edit_instruction,
+ "image_guidance_scale": image_guidance_scale,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def status_compare_text2img(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> CompareText2ImgPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ CompareText2ImgPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.compare_ai_image_generators.status_compare_text2img(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/CompareText2Img/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(CompareText2ImgPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncCompareAiImageGeneratorsClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def compare_text2img(
+ self,
+ *,
+ text_prompt: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ negative_prompt: typing.Optional[str] = OMIT,
+ output_width: typing.Optional[int] = OMIT,
+ output_height: typing.Optional[int] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[int] = OMIT,
+ dall_e3quality: typing.Optional[str] = OMIT,
+ dall_e3style: typing.Optional[str] = OMIT,
+ guidance_scale: typing.Optional[float] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ sd2upscaling: typing.Optional[bool] = OMIT,
+ selected_models: typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] = OMIT,
+ scheduler: typing.Optional[Scheduler] = OMIT,
+ edit_instruction: typing.Optional[str] = OMIT,
+ image_guidance_scale: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> CompareText2ImgPageResponse:
+ """
+ Parameters
+ ----------
+ text_prompt : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ negative_prompt : typing.Optional[str]
+
+ output_width : typing.Optional[int]
+
+ output_height : typing.Optional[int]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ dall_e3quality : typing.Optional[str]
+
+ dall_e3style : typing.Optional[str]
+
+ guidance_scale : typing.Optional[float]
+
+ seed : typing.Optional[int]
+
+ sd2upscaling : typing.Optional[bool]
+
+ selected_models : typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]]
+
+ scheduler : typing.Optional[Scheduler]
+
+ edit_instruction : typing.Optional[str]
+
+ image_guidance_scale : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ CompareText2ImgPageResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.compare_ai_image_generators.compare_text2img(
+ text_prompt="text_prompt",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v2/CompareText2Img/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "text_prompt": text_prompt,
+ "negative_prompt": negative_prompt,
+ "output_width": output_width,
+ "output_height": output_height,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "dall_e_3_quality": dall_e3quality,
+ "dall_e_3_style": dall_e3style,
+ "guidance_scale": guidance_scale,
+ "seed": seed,
+ "sd_2_upscaling": sd2upscaling,
+ "selected_models": selected_models,
+ "scheduler": scheduler,
+ "edit_instruction": edit_instruction,
+ "image_guidance_scale": image_guidance_scale,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(CompareText2ImgPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def async_compare_text2img(
+ self,
+ *,
+ text_prompt: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ negative_prompt: typing.Optional[str] = OMIT,
+ output_width: typing.Optional[int] = OMIT,
+ output_height: typing.Optional[int] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[int] = OMIT,
+ dall_e3quality: typing.Optional[str] = OMIT,
+ dall_e3style: typing.Optional[str] = OMIT,
+ guidance_scale: typing.Optional[float] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ sd2upscaling: typing.Optional[bool] = OMIT,
+ selected_models: typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] = OMIT,
+ scheduler: typing.Optional[Scheduler] = OMIT,
+ edit_instruction: typing.Optional[str] = OMIT,
+ image_guidance_scale: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ text_prompt : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ negative_prompt : typing.Optional[str]
+
+ output_width : typing.Optional[int]
+
+ output_height : typing.Optional[int]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ dall_e3quality : typing.Optional[str]
+
+ dall_e3style : typing.Optional[str]
+
+ guidance_scale : typing.Optional[float]
+
+ seed : typing.Optional[int]
+
+ sd2upscaling : typing.Optional[bool]
+
+ selected_models : typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]]
+
+ scheduler : typing.Optional[Scheduler]
+
+ edit_instruction : typing.Optional[str]
+
+ image_guidance_scale : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.compare_ai_image_generators.async_compare_text2img(
+ text_prompt="text_prompt",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/CompareText2Img/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "text_prompt": text_prompt,
+ "negative_prompt": negative_prompt,
+ "output_width": output_width,
+ "output_height": output_height,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "dall_e_3_quality": dall_e3quality,
+ "dall_e_3_style": dall_e3style,
+ "guidance_scale": guidance_scale,
+ "seed": seed,
+ "sd_2_upscaling": sd2upscaling,
+ "selected_models": selected_models,
+ "scheduler": scheduler,
+ "edit_instruction": edit_instruction,
+ "image_guidance_scale": image_guidance_scale,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def status_compare_text2img(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> CompareText2ImgPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ CompareText2ImgPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.compare_ai_image_generators.status_compare_text2img(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/CompareText2Img/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(CompareText2ImgPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/compare_ai_image_upscalers/__init__.py b/src/gooey/compare_ai_image_upscalers/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/compare_ai_image_upscalers/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/compare_ai_image_upscalers/client.py b/src/gooey/compare_ai_image_upscalers/client.py
new file mode 100644
index 0000000..5d6fda5
--- /dev/null
+++ b/src/gooey/compare_ai_image_upscalers/client.py
@@ -0,0 +1,519 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import pydantic_v1
+from ..core.request_options import RequestOptions
+from ..errors.internal_server_error import InternalServerError
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3
+from ..types.compare_upscaler_page_request_selected_models_item import CompareUpscalerPageRequestSelectedModelsItem
+from ..types.compare_upscaler_page_response import CompareUpscalerPageResponse
+from ..types.compare_upscaler_page_status_response import CompareUpscalerPageStatusResponse
+from ..types.failed_reponse_model_v2 import FailedReponseModelV2
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.recipe_function import RecipeFunction
+from ..types.run_settings import RunSettings
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class CompareAiImageUpscalersClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def compare_ai_upscalers(
+ self,
+ *,
+ scale: int,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ input_image: typing.Optional[str] = OMIT,
+ input_video: typing.Optional[str] = OMIT,
+ selected_models: typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]] = OMIT,
+ selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> CompareUpscalerPageResponse:
+ """
+ Parameters
+ ----------
+ scale : int
+ The final upsampling scale of the image
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ input_image : typing.Optional[str]
+ Input Image
+
+ input_video : typing.Optional[str]
+ Input Video
+
+ selected_models : typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]]
+
+ selected_bg_model : typing.Optional[typing.Literal["real_esrgan_x2"]]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ CompareUpscalerPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.compare_ai_image_upscalers.compare_ai_upscalers(
+ scale=1,
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v2/compare-ai-upscalers/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "input_image": input_image,
+ "input_video": input_video,
+ "scale": scale,
+ "selected_models": selected_models,
+ "selected_bg_model": selected_bg_model,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(CompareUpscalerPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def async_compare_ai_upscalers(
+ self,
+ *,
+ scale: int,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ input_image: typing.Optional[str] = OMIT,
+ input_video: typing.Optional[str] = OMIT,
+ selected_models: typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]] = OMIT,
+ selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ scale : int
+ The final upsampling scale of the image
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ input_image : typing.Optional[str]
+ Input Image
+
+ input_video : typing.Optional[str]
+ Input Video
+
+ selected_models : typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]]
+
+ selected_bg_model : typing.Optional[typing.Literal["real_esrgan_x2"]]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.compare_ai_image_upscalers.async_compare_ai_upscalers(
+ scale=1,
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/compare-ai-upscalers/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "input_image": input_image,
+ "input_video": input_video,
+ "scale": scale,
+ "selected_models": selected_models,
+ "selected_bg_model": selected_bg_model,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def status_compare_ai_upscalers(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> CompareUpscalerPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ CompareUpscalerPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.compare_ai_image_upscalers.status_compare_ai_upscalers(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/compare-ai-upscalers/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(CompareUpscalerPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncCompareAiImageUpscalersClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def compare_ai_upscalers(
+ self,
+ *,
+ scale: int,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ input_image: typing.Optional[str] = OMIT,
+ input_video: typing.Optional[str] = OMIT,
+ selected_models: typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]] = OMIT,
+ selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> CompareUpscalerPageResponse:
+ """
+ Parameters
+ ----------
+ scale : int
+ The final upsampling scale of the image
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ input_image : typing.Optional[str]
+ Input Image
+
+ input_video : typing.Optional[str]
+ Input Video
+
+ selected_models : typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]]
+
+ selected_bg_model : typing.Optional[typing.Literal["real_esrgan_x2"]]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ CompareUpscalerPageResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.compare_ai_image_upscalers.compare_ai_upscalers(
+ scale=1,
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v2/compare-ai-upscalers/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "input_image": input_image,
+ "input_video": input_video,
+ "scale": scale,
+ "selected_models": selected_models,
+ "selected_bg_model": selected_bg_model,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(CompareUpscalerPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def async_compare_ai_upscalers(
+ self,
+ *,
+ scale: int,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ input_image: typing.Optional[str] = OMIT,
+ input_video: typing.Optional[str] = OMIT,
+ selected_models: typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]] = OMIT,
+ selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ scale : int
+ The final upsampling scale of the image
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ input_image : typing.Optional[str]
+ Input Image
+
+ input_video : typing.Optional[str]
+ Input Video
+
+ selected_models : typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]]
+
+ selected_bg_model : typing.Optional[typing.Literal["real_esrgan_x2"]]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.compare_ai_image_upscalers.async_compare_ai_upscalers(
+ scale=1,
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/compare-ai-upscalers/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "input_image": input_image,
+ "input_video": input_video,
+ "scale": scale,
+ "selected_models": selected_models,
+ "selected_bg_model": selected_bg_model,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def status_compare_ai_upscalers(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> CompareUpscalerPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ CompareUpscalerPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.compare_ai_image_upscalers.status_compare_ai_upscalers(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/compare-ai-upscalers/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(CompareUpscalerPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/compare_ai_translations/__init__.py b/src/gooey/compare_ai_translations/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/compare_ai_translations/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/compare_ai_translations/client.py b/src/gooey/compare_ai_translations/client.py
new file mode 100644
index 0000000..1adf7cc
--- /dev/null
+++ b/src/gooey/compare_ai_translations/client.py
@@ -0,0 +1,507 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import pydantic_v1
+from ..core.request_options import RequestOptions
+from ..errors.internal_server_error import InternalServerError
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3
+from ..types.failed_reponse_model_v2 import FailedReponseModelV2
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.recipe_function import RecipeFunction
+from ..types.run_settings import RunSettings
+from ..types.translation_page_request_selected_model import TranslationPageRequestSelectedModel
+from ..types.translation_page_response import TranslationPageResponse
+from ..types.translation_page_status_response import TranslationPageStatusResponse
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class CompareAiTranslationsClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def translate(
+ self,
+ *,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ texts: typing.Optional[typing.Sequence[str]] = OMIT,
+ selected_model: typing.Optional[TranslationPageRequestSelectedModel] = OMIT,
+ translation_source: typing.Optional[str] = OMIT,
+ translation_target: typing.Optional[str] = OMIT,
+ glossary_document: typing.Optional[str] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> TranslationPageResponse:
+ """
+ Parameters
+ ----------
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ texts : typing.Optional[typing.Sequence[str]]
+
+ selected_model : typing.Optional[TranslationPageRequestSelectedModel]
+
+ translation_source : typing.Optional[str]
+
+ translation_target : typing.Optional[str]
+
+ glossary_document : typing.Optional[str]
+ Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+ If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ TranslationPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.compare_ai_translations.translate()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v2/translate/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "texts": texts,
+ "selected_model": selected_model,
+ "translation_source": translation_source,
+ "translation_target": translation_target,
+ "glossary_document": glossary_document,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(TranslationPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def async_translate(
+ self,
+ *,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ texts: typing.Optional[typing.Sequence[str]] = OMIT,
+ selected_model: typing.Optional[TranslationPageRequestSelectedModel] = OMIT,
+ translation_source: typing.Optional[str] = OMIT,
+ translation_target: typing.Optional[str] = OMIT,
+ glossary_document: typing.Optional[str] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ texts : typing.Optional[typing.Sequence[str]]
+
+ selected_model : typing.Optional[TranslationPageRequestSelectedModel]
+
+ translation_source : typing.Optional[str]
+
+ translation_target : typing.Optional[str]
+
+ glossary_document : typing.Optional[str]
+ Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+ If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.compare_ai_translations.async_translate()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/translate/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "texts": texts,
+ "selected_model": selected_model,
+ "translation_source": translation_source,
+ "translation_target": translation_target,
+ "glossary_document": glossary_document,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def status_translate(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> TranslationPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ TranslationPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.compare_ai_translations.status_translate(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/translate/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(TranslationPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncCompareAiTranslationsClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def translate(
+ self,
+ *,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ texts: typing.Optional[typing.Sequence[str]] = OMIT,
+ selected_model: typing.Optional[TranslationPageRequestSelectedModel] = OMIT,
+ translation_source: typing.Optional[str] = OMIT,
+ translation_target: typing.Optional[str] = OMIT,
+ glossary_document: typing.Optional[str] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> TranslationPageResponse:
+ """
+ Parameters
+ ----------
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ texts : typing.Optional[typing.Sequence[str]]
+
+ selected_model : typing.Optional[TranslationPageRequestSelectedModel]
+
+ translation_source : typing.Optional[str]
+
+ translation_target : typing.Optional[str]
+
+ glossary_document : typing.Optional[str]
+ Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+ If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ TranslationPageResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.compare_ai_translations.translate()
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v2/translate/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "texts": texts,
+ "selected_model": selected_model,
+ "translation_source": translation_source,
+ "translation_target": translation_target,
+ "glossary_document": glossary_document,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(TranslationPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def async_translate(
+ self,
+ *,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ texts: typing.Optional[typing.Sequence[str]] = OMIT,
+ selected_model: typing.Optional[TranslationPageRequestSelectedModel] = OMIT,
+ translation_source: typing.Optional[str] = OMIT,
+ translation_target: typing.Optional[str] = OMIT,
+ glossary_document: typing.Optional[str] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ texts : typing.Optional[typing.Sequence[str]]
+
+ selected_model : typing.Optional[TranslationPageRequestSelectedModel]
+
+ translation_source : typing.Optional[str]
+
+ translation_target : typing.Optional[str]
+
+ glossary_document : typing.Optional[str]
+ Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+ If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.compare_ai_translations.async_translate()
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/translate/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "texts": texts,
+ "selected_model": selected_model,
+ "translation_source": translation_source,
+ "translation_target": translation_target,
+ "glossary_document": glossary_document,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def status_translate(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> TranslationPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ TranslationPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.compare_ai_translations.status_translate(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/translate/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(TranslationPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/compare_ai_voice_generators/__init__.py b/src/gooey/compare_ai_voice_generators/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/compare_ai_voice_generators/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/compare_ai_voice_generators/client.py b/src/gooey/compare_ai_voice_generators/client.py
new file mode 100644
index 0000000..44604f6
--- /dev/null
+++ b/src/gooey/compare_ai_voice_generators/client.py
@@ -0,0 +1,737 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import pydantic_v1
+from ..core.request_options import RequestOptions
+from ..errors.internal_server_error import InternalServerError
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3
+from ..types.failed_reponse_model_v2 import FailedReponseModelV2
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.recipe_function import RecipeFunction
+from ..types.run_settings import RunSettings
+from ..types.text_to_speech_page_request_openai_tts_model import TextToSpeechPageRequestOpenaiTtsModel
+from ..types.text_to_speech_page_request_openai_voice_name import TextToSpeechPageRequestOpenaiVoiceName
+from ..types.text_to_speech_page_request_tts_provider import TextToSpeechPageRequestTtsProvider
+from ..types.text_to_speech_page_response import TextToSpeechPageResponse
+from ..types.text_to_speech_page_status_response import TextToSpeechPageStatusResponse
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class CompareAiVoiceGeneratorsClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def text_to_speech(
+ self,
+ *,
+ text_prompt: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ tts_provider: typing.Optional[TextToSpeechPageRequestTtsProvider] = OMIT,
+ uberduck_voice_name: typing.Optional[str] = OMIT,
+ uberduck_speaking_rate: typing.Optional[float] = OMIT,
+ google_voice_name: typing.Optional[str] = OMIT,
+ google_speaking_rate: typing.Optional[float] = OMIT,
+ google_pitch: typing.Optional[float] = OMIT,
+ bark_history_prompt: typing.Optional[str] = OMIT,
+ elevenlabs_voice_name: typing.Optional[str] = OMIT,
+ elevenlabs_api_key: typing.Optional[str] = OMIT,
+ elevenlabs_voice_id: typing.Optional[str] = OMIT,
+ elevenlabs_model: typing.Optional[str] = OMIT,
+ elevenlabs_stability: typing.Optional[float] = OMIT,
+ elevenlabs_similarity_boost: typing.Optional[float] = OMIT,
+ elevenlabs_style: typing.Optional[float] = OMIT,
+ elevenlabs_speaker_boost: typing.Optional[bool] = OMIT,
+ azure_voice_name: typing.Optional[str] = OMIT,
+ openai_voice_name: typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] = OMIT,
+ openai_tts_model: typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> TextToSpeechPageResponse:
+ """
+ Parameters
+ ----------
+ text_prompt : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ tts_provider : typing.Optional[TextToSpeechPageRequestTtsProvider]
+
+ uberduck_voice_name : typing.Optional[str]
+
+ uberduck_speaking_rate : typing.Optional[float]
+
+ google_voice_name : typing.Optional[str]
+
+ google_speaking_rate : typing.Optional[float]
+
+ google_pitch : typing.Optional[float]
+
+ bark_history_prompt : typing.Optional[str]
+
+ elevenlabs_voice_name : typing.Optional[str]
+ Use `elevenlabs_voice_id` instead
+
+ elevenlabs_api_key : typing.Optional[str]
+
+ elevenlabs_voice_id : typing.Optional[str]
+
+ elevenlabs_model : typing.Optional[str]
+
+ elevenlabs_stability : typing.Optional[float]
+
+ elevenlabs_similarity_boost : typing.Optional[float]
+
+ elevenlabs_style : typing.Optional[float]
+
+ elevenlabs_speaker_boost : typing.Optional[bool]
+
+ azure_voice_name : typing.Optional[str]
+
+ openai_voice_name : typing.Optional[TextToSpeechPageRequestOpenaiVoiceName]
+
+ openai_tts_model : typing.Optional[TextToSpeechPageRequestOpenaiTtsModel]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ TextToSpeechPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.compare_ai_voice_generators.text_to_speech(
+ text_prompt="text_prompt",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v2/TextToSpeech/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "text_prompt": text_prompt,
+ "tts_provider": tts_provider,
+ "uberduck_voice_name": uberduck_voice_name,
+ "uberduck_speaking_rate": uberduck_speaking_rate,
+ "google_voice_name": google_voice_name,
+ "google_speaking_rate": google_speaking_rate,
+ "google_pitch": google_pitch,
+ "bark_history_prompt": bark_history_prompt,
+ "elevenlabs_voice_name": elevenlabs_voice_name,
+ "elevenlabs_api_key": elevenlabs_api_key,
+ "elevenlabs_voice_id": elevenlabs_voice_id,
+ "elevenlabs_model": elevenlabs_model,
+ "elevenlabs_stability": elevenlabs_stability,
+ "elevenlabs_similarity_boost": elevenlabs_similarity_boost,
+ "elevenlabs_style": elevenlabs_style,
+ "elevenlabs_speaker_boost": elevenlabs_speaker_boost,
+ "azure_voice_name": azure_voice_name,
+ "openai_voice_name": openai_voice_name,
+ "openai_tts_model": openai_tts_model,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(TextToSpeechPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def async_text_to_speech(
+ self,
+ *,
+ text_prompt: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ tts_provider: typing.Optional[TextToSpeechPageRequestTtsProvider] = OMIT,
+ uberduck_voice_name: typing.Optional[str] = OMIT,
+ uberduck_speaking_rate: typing.Optional[float] = OMIT,
+ google_voice_name: typing.Optional[str] = OMIT,
+ google_speaking_rate: typing.Optional[float] = OMIT,
+ google_pitch: typing.Optional[float] = OMIT,
+ bark_history_prompt: typing.Optional[str] = OMIT,
+ elevenlabs_voice_name: typing.Optional[str] = OMIT,
+ elevenlabs_api_key: typing.Optional[str] = OMIT,
+ elevenlabs_voice_id: typing.Optional[str] = OMIT,
+ elevenlabs_model: typing.Optional[str] = OMIT,
+ elevenlabs_stability: typing.Optional[float] = OMIT,
+ elevenlabs_similarity_boost: typing.Optional[float] = OMIT,
+ elevenlabs_style: typing.Optional[float] = OMIT,
+ elevenlabs_speaker_boost: typing.Optional[bool] = OMIT,
+ azure_voice_name: typing.Optional[str] = OMIT,
+ openai_voice_name: typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] = OMIT,
+ openai_tts_model: typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ text_prompt : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ tts_provider : typing.Optional[TextToSpeechPageRequestTtsProvider]
+
+ uberduck_voice_name : typing.Optional[str]
+
+ uberduck_speaking_rate : typing.Optional[float]
+
+ google_voice_name : typing.Optional[str]
+
+ google_speaking_rate : typing.Optional[float]
+
+ google_pitch : typing.Optional[float]
+
+ bark_history_prompt : typing.Optional[str]
+
+ elevenlabs_voice_name : typing.Optional[str]
+ Use `elevenlabs_voice_id` instead
+
+ elevenlabs_api_key : typing.Optional[str]
+
+ elevenlabs_voice_id : typing.Optional[str]
+
+ elevenlabs_model : typing.Optional[str]
+
+ elevenlabs_stability : typing.Optional[float]
+
+ elevenlabs_similarity_boost : typing.Optional[float]
+
+ elevenlabs_style : typing.Optional[float]
+
+ elevenlabs_speaker_boost : typing.Optional[bool]
+
+ azure_voice_name : typing.Optional[str]
+
+ openai_voice_name : typing.Optional[TextToSpeechPageRequestOpenaiVoiceName]
+
+ openai_tts_model : typing.Optional[TextToSpeechPageRequestOpenaiTtsModel]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.compare_ai_voice_generators.async_text_to_speech(
+ text_prompt="text_prompt",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/TextToSpeech/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "text_prompt": text_prompt,
+ "tts_provider": tts_provider,
+ "uberduck_voice_name": uberduck_voice_name,
+ "uberduck_speaking_rate": uberduck_speaking_rate,
+ "google_voice_name": google_voice_name,
+ "google_speaking_rate": google_speaking_rate,
+ "google_pitch": google_pitch,
+ "bark_history_prompt": bark_history_prompt,
+ "elevenlabs_voice_name": elevenlabs_voice_name,
+ "elevenlabs_api_key": elevenlabs_api_key,
+ "elevenlabs_voice_id": elevenlabs_voice_id,
+ "elevenlabs_model": elevenlabs_model,
+ "elevenlabs_stability": elevenlabs_stability,
+ "elevenlabs_similarity_boost": elevenlabs_similarity_boost,
+ "elevenlabs_style": elevenlabs_style,
+ "elevenlabs_speaker_boost": elevenlabs_speaker_boost,
+ "azure_voice_name": azure_voice_name,
+ "openai_voice_name": openai_voice_name,
+ "openai_tts_model": openai_tts_model,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def status_text_to_speech(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> TextToSpeechPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ TextToSpeechPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.compare_ai_voice_generators.status_text_to_speech(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/TextToSpeech/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(TextToSpeechPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncCompareAiVoiceGeneratorsClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def text_to_speech(
+ self,
+ *,
+ text_prompt: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ tts_provider: typing.Optional[TextToSpeechPageRequestTtsProvider] = OMIT,
+ uberduck_voice_name: typing.Optional[str] = OMIT,
+ uberduck_speaking_rate: typing.Optional[float] = OMIT,
+ google_voice_name: typing.Optional[str] = OMIT,
+ google_speaking_rate: typing.Optional[float] = OMIT,
+ google_pitch: typing.Optional[float] = OMIT,
+ bark_history_prompt: typing.Optional[str] = OMIT,
+ elevenlabs_voice_name: typing.Optional[str] = OMIT,
+ elevenlabs_api_key: typing.Optional[str] = OMIT,
+ elevenlabs_voice_id: typing.Optional[str] = OMIT,
+ elevenlabs_model: typing.Optional[str] = OMIT,
+ elevenlabs_stability: typing.Optional[float] = OMIT,
+ elevenlabs_similarity_boost: typing.Optional[float] = OMIT,
+ elevenlabs_style: typing.Optional[float] = OMIT,
+ elevenlabs_speaker_boost: typing.Optional[bool] = OMIT,
+ azure_voice_name: typing.Optional[str] = OMIT,
+ openai_voice_name: typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] = OMIT,
+ openai_tts_model: typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> TextToSpeechPageResponse:
+ """
+ Parameters
+ ----------
+ text_prompt : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ tts_provider : typing.Optional[TextToSpeechPageRequestTtsProvider]
+
+ uberduck_voice_name : typing.Optional[str]
+
+ uberduck_speaking_rate : typing.Optional[float]
+
+ google_voice_name : typing.Optional[str]
+
+ google_speaking_rate : typing.Optional[float]
+
+ google_pitch : typing.Optional[float]
+
+ bark_history_prompt : typing.Optional[str]
+
+ elevenlabs_voice_name : typing.Optional[str]
+ Use `elevenlabs_voice_id` instead
+
+ elevenlabs_api_key : typing.Optional[str]
+
+ elevenlabs_voice_id : typing.Optional[str]
+
+ elevenlabs_model : typing.Optional[str]
+
+ elevenlabs_stability : typing.Optional[float]
+
+ elevenlabs_similarity_boost : typing.Optional[float]
+
+ elevenlabs_style : typing.Optional[float]
+
+ elevenlabs_speaker_boost : typing.Optional[bool]
+
+ azure_voice_name : typing.Optional[str]
+
+ openai_voice_name : typing.Optional[TextToSpeechPageRequestOpenaiVoiceName]
+
+ openai_tts_model : typing.Optional[TextToSpeechPageRequestOpenaiTtsModel]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ TextToSpeechPageResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.compare_ai_voice_generators.text_to_speech(
+ text_prompt="text_prompt",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v2/TextToSpeech/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "text_prompt": text_prompt,
+ "tts_provider": tts_provider,
+ "uberduck_voice_name": uberduck_voice_name,
+ "uberduck_speaking_rate": uberduck_speaking_rate,
+ "google_voice_name": google_voice_name,
+ "google_speaking_rate": google_speaking_rate,
+ "google_pitch": google_pitch,
+ "bark_history_prompt": bark_history_prompt,
+ "elevenlabs_voice_name": elevenlabs_voice_name,
+ "elevenlabs_api_key": elevenlabs_api_key,
+ "elevenlabs_voice_id": elevenlabs_voice_id,
+ "elevenlabs_model": elevenlabs_model,
+ "elevenlabs_stability": elevenlabs_stability,
+ "elevenlabs_similarity_boost": elevenlabs_similarity_boost,
+ "elevenlabs_style": elevenlabs_style,
+ "elevenlabs_speaker_boost": elevenlabs_speaker_boost,
+ "azure_voice_name": azure_voice_name,
+ "openai_voice_name": openai_voice_name,
+ "openai_tts_model": openai_tts_model,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(TextToSpeechPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def async_text_to_speech(
+ self,
+ *,
+ text_prompt: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ tts_provider: typing.Optional[TextToSpeechPageRequestTtsProvider] = OMIT,
+ uberduck_voice_name: typing.Optional[str] = OMIT,
+ uberduck_speaking_rate: typing.Optional[float] = OMIT,
+ google_voice_name: typing.Optional[str] = OMIT,
+ google_speaking_rate: typing.Optional[float] = OMIT,
+ google_pitch: typing.Optional[float] = OMIT,
+ bark_history_prompt: typing.Optional[str] = OMIT,
+ elevenlabs_voice_name: typing.Optional[str] = OMIT,
+ elevenlabs_api_key: typing.Optional[str] = OMIT,
+ elevenlabs_voice_id: typing.Optional[str] = OMIT,
+ elevenlabs_model: typing.Optional[str] = OMIT,
+ elevenlabs_stability: typing.Optional[float] = OMIT,
+ elevenlabs_similarity_boost: typing.Optional[float] = OMIT,
+ elevenlabs_style: typing.Optional[float] = OMIT,
+ elevenlabs_speaker_boost: typing.Optional[bool] = OMIT,
+ azure_voice_name: typing.Optional[str] = OMIT,
+ openai_voice_name: typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] = OMIT,
+ openai_tts_model: typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ text_prompt : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ tts_provider : typing.Optional[TextToSpeechPageRequestTtsProvider]
+
+ uberduck_voice_name : typing.Optional[str]
+
+ uberduck_speaking_rate : typing.Optional[float]
+
+ google_voice_name : typing.Optional[str]
+
+ google_speaking_rate : typing.Optional[float]
+
+ google_pitch : typing.Optional[float]
+
+ bark_history_prompt : typing.Optional[str]
+
+ elevenlabs_voice_name : typing.Optional[str]
+ Use `elevenlabs_voice_id` instead
+
+ elevenlabs_api_key : typing.Optional[str]
+
+ elevenlabs_voice_id : typing.Optional[str]
+
+ elevenlabs_model : typing.Optional[str]
+
+ elevenlabs_stability : typing.Optional[float]
+
+ elevenlabs_similarity_boost : typing.Optional[float]
+
+ elevenlabs_style : typing.Optional[float]
+
+ elevenlabs_speaker_boost : typing.Optional[bool]
+
+ azure_voice_name : typing.Optional[str]
+
+ openai_voice_name : typing.Optional[TextToSpeechPageRequestOpenaiVoiceName]
+
+ openai_tts_model : typing.Optional[TextToSpeechPageRequestOpenaiTtsModel]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.compare_ai_voice_generators.async_text_to_speech(
+ text_prompt="text_prompt",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/TextToSpeech/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "text_prompt": text_prompt,
+ "tts_provider": tts_provider,
+ "uberduck_voice_name": uberduck_voice_name,
+ "uberduck_speaking_rate": uberduck_speaking_rate,
+ "google_voice_name": google_voice_name,
+ "google_speaking_rate": google_speaking_rate,
+ "google_pitch": google_pitch,
+ "bark_history_prompt": bark_history_prompt,
+ "elevenlabs_voice_name": elevenlabs_voice_name,
+ "elevenlabs_api_key": elevenlabs_api_key,
+ "elevenlabs_voice_id": elevenlabs_voice_id,
+ "elevenlabs_model": elevenlabs_model,
+ "elevenlabs_stability": elevenlabs_stability,
+ "elevenlabs_similarity_boost": elevenlabs_similarity_boost,
+ "elevenlabs_style": elevenlabs_style,
+ "elevenlabs_speaker_boost": elevenlabs_speaker_boost,
+ "azure_voice_name": azure_voice_name,
+ "openai_voice_name": openai_voice_name,
+ "openai_tts_model": openai_tts_model,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def status_text_to_speech(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> TextToSpeechPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ TextToSpeechPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.compare_ai_voice_generators.status_text_to_speech(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/TextToSpeech/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(TextToSpeechPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/copilot_for_your_enterprise/__init__.py b/src/gooey/copilot_for_your_enterprise/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/copilot_for_your_enterprise/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/copilot_for_your_enterprise/client.py b/src/gooey/copilot_for_your_enterprise/client.py
new file mode 100644
index 0000000..20c1ea6
--- /dev/null
+++ b/src/gooey/copilot_for_your_enterprise/client.py
@@ -0,0 +1,1386 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import pydantic_v1
+from ..core.request_options import RequestOptions
+from ..errors.internal_server_error import InternalServerError
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3
+from ..types.conversation_entry import ConversationEntry
+from ..types.failed_reponse_model_v2 import FailedReponseModelV2
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.llm_tools import LlmTools
+from ..types.recipe_function import RecipeFunction
+from ..types.run_settings import RunSettings
+from ..types.sad_talker_settings import SadTalkerSettings
+from ..types.video_bots_page_request_asr_model import VideoBotsPageRequestAsrModel
+from ..types.video_bots_page_request_citation_style import VideoBotsPageRequestCitationStyle
+from ..types.video_bots_page_request_embedding_model import VideoBotsPageRequestEmbeddingModel
+from ..types.video_bots_page_request_lipsync_model import VideoBotsPageRequestLipsyncModel
+from ..types.video_bots_page_request_openai_tts_model import VideoBotsPageRequestOpenaiTtsModel
+from ..types.video_bots_page_request_openai_voice_name import VideoBotsPageRequestOpenaiVoiceName
+from ..types.video_bots_page_request_selected_model import VideoBotsPageRequestSelectedModel
+from ..types.video_bots_page_request_translation_model import VideoBotsPageRequestTranslationModel
+from ..types.video_bots_page_request_tts_provider import VideoBotsPageRequestTtsProvider
+from ..types.video_bots_page_response import VideoBotsPageResponse
+from ..types.video_bots_page_status_response import VideoBotsPageStatusResponse
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class CopilotForYourEnterpriseClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def video_bots(
+ self,
+ *,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ input_prompt: typing.Optional[str] = OMIT,
+ input_audio: typing.Optional[str] = OMIT,
+ input_images: typing.Optional[typing.Sequence[str]] = OMIT,
+ input_documents: typing.Optional[typing.Sequence[str]] = OMIT,
+ doc_extract_url: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ConversationEntry]] = OMIT,
+ bot_script: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[VideoBotsPageRequestSelectedModel] = OMIT,
+ document_model: typing.Optional[str] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ task_instructions: typing.Optional[str] = OMIT,
+ query_instructions: typing.Optional[str] = OMIT,
+ keyword_instructions: typing.Optional[str] = OMIT,
+ documents: typing.Optional[typing.Sequence[str]] = OMIT,
+ max_references: typing.Optional[int] = OMIT,
+ max_context_words: typing.Optional[int] = OMIT,
+ scroll_jump: typing.Optional[int] = OMIT,
+ embedding_model: typing.Optional[VideoBotsPageRequestEmbeddingModel] = OMIT,
+ dense_weight: typing.Optional[float] = OMIT,
+ citation_style: typing.Optional[VideoBotsPageRequestCitationStyle] = OMIT,
+ use_url_shortener: typing.Optional[bool] = OMIT,
+ asr_model: typing.Optional[VideoBotsPageRequestAsrModel] = OMIT,
+ asr_language: typing.Optional[str] = OMIT,
+ translation_model: typing.Optional[VideoBotsPageRequestTranslationModel] = OMIT,
+ user_language: typing.Optional[str] = OMIT,
+ input_glossary_document: typing.Optional[str] = OMIT,
+ output_glossary_document: typing.Optional[str] = OMIT,
+ lipsync_model: typing.Optional[VideoBotsPageRequestLipsyncModel] = OMIT,
+ tools: typing.Optional[typing.Sequence[LlmTools]] = OMIT,
+ tts_provider: typing.Optional[VideoBotsPageRequestTtsProvider] = OMIT,
+ uberduck_voice_name: typing.Optional[str] = OMIT,
+ uberduck_speaking_rate: typing.Optional[float] = OMIT,
+ google_voice_name: typing.Optional[str] = OMIT,
+ google_speaking_rate: typing.Optional[float] = OMIT,
+ google_pitch: typing.Optional[float] = OMIT,
+ bark_history_prompt: typing.Optional[str] = OMIT,
+ elevenlabs_voice_name: typing.Optional[str] = OMIT,
+ elevenlabs_api_key: typing.Optional[str] = OMIT,
+ elevenlabs_voice_id: typing.Optional[str] = OMIT,
+ elevenlabs_model: typing.Optional[str] = OMIT,
+ elevenlabs_stability: typing.Optional[float] = OMIT,
+ elevenlabs_similarity_boost: typing.Optional[float] = OMIT,
+ elevenlabs_style: typing.Optional[float] = OMIT,
+ elevenlabs_speaker_boost: typing.Optional[bool] = OMIT,
+ azure_voice_name: typing.Optional[str] = OMIT,
+ openai_voice_name: typing.Optional[VideoBotsPageRequestOpenaiVoiceName] = OMIT,
+ openai_tts_model: typing.Optional[VideoBotsPageRequestOpenaiTtsModel] = OMIT,
+ input_face: typing.Optional[str] = OMIT,
+ face_padding_top: typing.Optional[int] = OMIT,
+ face_padding_bottom: typing.Optional[int] = OMIT,
+ face_padding_left: typing.Optional[int] = OMIT,
+ face_padding_right: typing.Optional[int] = OMIT,
+ sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> VideoBotsPageResponse:
+ """
+ Parameters
+ ----------
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ input_prompt : typing.Optional[str]
+
+ input_audio : typing.Optional[str]
+
+ input_images : typing.Optional[typing.Sequence[str]]
+
+ input_documents : typing.Optional[typing.Sequence[str]]
+
+ doc_extract_url : typing.Optional[str]
+ Select a workflow to extract text from documents and images.
+
+ messages : typing.Optional[typing.Sequence[ConversationEntry]]
+
+ bot_script : typing.Optional[str]
+
+ selected_model : typing.Optional[VideoBotsPageRequestSelectedModel]
+
+ document_model : typing.Optional[str]
+ When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api))
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ task_instructions : typing.Optional[str]
+
+ query_instructions : typing.Optional[str]
+
+ keyword_instructions : typing.Optional[str]
+
+ documents : typing.Optional[typing.Sequence[str]]
+
+ max_references : typing.Optional[int]
+
+ max_context_words : typing.Optional[int]
+
+ scroll_jump : typing.Optional[int]
+
+ embedding_model : typing.Optional[VideoBotsPageRequestEmbeddingModel]
+
+ dense_weight : typing.Optional[float]
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+ citation_style : typing.Optional[VideoBotsPageRequestCitationStyle]
+
+ use_url_shortener : typing.Optional[bool]
+
+ asr_model : typing.Optional[VideoBotsPageRequestAsrModel]
+ Choose a model to transcribe incoming audio messages to text.
+
+ asr_language : typing.Optional[str]
+ Choose a language to transcribe incoming audio messages to text.
+
+ translation_model : typing.Optional[VideoBotsPageRequestTranslationModel]
+
+ user_language : typing.Optional[str]
+ Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages.
+
+ input_glossary_document : typing.Optional[str]
+ Translation Glossary for User Langauge -> LLM Language (English)
+
+ output_glossary_document : typing.Optional[str]
+ Translation Glossary for LLM Language (English) -> User Langauge
+
+ lipsync_model : typing.Optional[VideoBotsPageRequestLipsyncModel]
+
+ tools : typing.Optional[typing.Sequence[LlmTools]]
+ Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
+
+ tts_provider : typing.Optional[VideoBotsPageRequestTtsProvider]
+
+ uberduck_voice_name : typing.Optional[str]
+
+ uberduck_speaking_rate : typing.Optional[float]
+
+ google_voice_name : typing.Optional[str]
+
+ google_speaking_rate : typing.Optional[float]
+
+ google_pitch : typing.Optional[float]
+
+ bark_history_prompt : typing.Optional[str]
+
+ elevenlabs_voice_name : typing.Optional[str]
+ Use `elevenlabs_voice_id` instead
+
+ elevenlabs_api_key : typing.Optional[str]
+
+ elevenlabs_voice_id : typing.Optional[str]
+
+ elevenlabs_model : typing.Optional[str]
+
+ elevenlabs_stability : typing.Optional[float]
+
+ elevenlabs_similarity_boost : typing.Optional[float]
+
+ elevenlabs_style : typing.Optional[float]
+
+ elevenlabs_speaker_boost : typing.Optional[bool]
+
+ azure_voice_name : typing.Optional[str]
+
+ openai_voice_name : typing.Optional[VideoBotsPageRequestOpenaiVoiceName]
+
+ openai_tts_model : typing.Optional[VideoBotsPageRequestOpenaiTtsModel]
+
+ input_face : typing.Optional[str]
+
+ face_padding_top : typing.Optional[int]
+
+ face_padding_bottom : typing.Optional[int]
+
+ face_padding_left : typing.Optional[int]
+
+ face_padding_right : typing.Optional[int]
+
+ sadtalker_settings : typing.Optional[SadTalkerSettings]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ VideoBotsPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.copilot_for_your_enterprise.video_bots()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v2/video-bots/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "input_prompt": input_prompt,
+ "input_audio": input_audio,
+ "input_images": input_images,
+ "input_documents": input_documents,
+ "doc_extract_url": doc_extract_url,
+ "messages": messages,
+ "bot_script": bot_script,
+ "selected_model": selected_model,
+ "document_model": document_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "task_instructions": task_instructions,
+ "query_instructions": query_instructions,
+ "keyword_instructions": keyword_instructions,
+ "documents": documents,
+ "max_references": max_references,
+ "max_context_words": max_context_words,
+ "scroll_jump": scroll_jump,
+ "embedding_model": embedding_model,
+ "dense_weight": dense_weight,
+ "citation_style": citation_style,
+ "use_url_shortener": use_url_shortener,
+ "asr_model": asr_model,
+ "asr_language": asr_language,
+ "translation_model": translation_model,
+ "user_language": user_language,
+ "input_glossary_document": input_glossary_document,
+ "output_glossary_document": output_glossary_document,
+ "lipsync_model": lipsync_model,
+ "tools": tools,
+ "tts_provider": tts_provider,
+ "uberduck_voice_name": uberduck_voice_name,
+ "uberduck_speaking_rate": uberduck_speaking_rate,
+ "google_voice_name": google_voice_name,
+ "google_speaking_rate": google_speaking_rate,
+ "google_pitch": google_pitch,
+ "bark_history_prompt": bark_history_prompt,
+ "elevenlabs_voice_name": elevenlabs_voice_name,
+ "elevenlabs_api_key": elevenlabs_api_key,
+ "elevenlabs_voice_id": elevenlabs_voice_id,
+ "elevenlabs_model": elevenlabs_model,
+ "elevenlabs_stability": elevenlabs_stability,
+ "elevenlabs_similarity_boost": elevenlabs_similarity_boost,
+ "elevenlabs_style": elevenlabs_style,
+ "elevenlabs_speaker_boost": elevenlabs_speaker_boost,
+ "azure_voice_name": azure_voice_name,
+ "openai_voice_name": openai_voice_name,
+ "openai_tts_model": openai_tts_model,
+ "input_face": input_face,
+ "face_padding_top": face_padding_top,
+ "face_padding_bottom": face_padding_bottom,
+ "face_padding_left": face_padding_left,
+ "face_padding_right": face_padding_right,
+ "sadtalker_settings": sadtalker_settings,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(VideoBotsPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def async_video_bots(
+ self,
+ *,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ input_prompt: typing.Optional[str] = OMIT,
+ input_audio: typing.Optional[str] = OMIT,
+ input_images: typing.Optional[typing.Sequence[str]] = OMIT,
+ input_documents: typing.Optional[typing.Sequence[str]] = OMIT,
+ doc_extract_url: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ConversationEntry]] = OMIT,
+ bot_script: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[VideoBotsPageRequestSelectedModel] = OMIT,
+ document_model: typing.Optional[str] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ task_instructions: typing.Optional[str] = OMIT,
+ query_instructions: typing.Optional[str] = OMIT,
+ keyword_instructions: typing.Optional[str] = OMIT,
+ documents: typing.Optional[typing.Sequence[str]] = OMIT,
+ max_references: typing.Optional[int] = OMIT,
+ max_context_words: typing.Optional[int] = OMIT,
+ scroll_jump: typing.Optional[int] = OMIT,
+ embedding_model: typing.Optional[VideoBotsPageRequestEmbeddingModel] = OMIT,
+ dense_weight: typing.Optional[float] = OMIT,
+ citation_style: typing.Optional[VideoBotsPageRequestCitationStyle] = OMIT,
+ use_url_shortener: typing.Optional[bool] = OMIT,
+ asr_model: typing.Optional[VideoBotsPageRequestAsrModel] = OMIT,
+ asr_language: typing.Optional[str] = OMIT,
+ translation_model: typing.Optional[VideoBotsPageRequestTranslationModel] = OMIT,
+ user_language: typing.Optional[str] = OMIT,
+ input_glossary_document: typing.Optional[str] = OMIT,
+ output_glossary_document: typing.Optional[str] = OMIT,
+ lipsync_model: typing.Optional[VideoBotsPageRequestLipsyncModel] = OMIT,
+ tools: typing.Optional[typing.Sequence[LlmTools]] = OMIT,
+ tts_provider: typing.Optional[VideoBotsPageRequestTtsProvider] = OMIT,
+ uberduck_voice_name: typing.Optional[str] = OMIT,
+ uberduck_speaking_rate: typing.Optional[float] = OMIT,
+ google_voice_name: typing.Optional[str] = OMIT,
+ google_speaking_rate: typing.Optional[float] = OMIT,
+ google_pitch: typing.Optional[float] = OMIT,
+ bark_history_prompt: typing.Optional[str] = OMIT,
+ elevenlabs_voice_name: typing.Optional[str] = OMIT,
+ elevenlabs_api_key: typing.Optional[str] = OMIT,
+ elevenlabs_voice_id: typing.Optional[str] = OMIT,
+ elevenlabs_model: typing.Optional[str] = OMIT,
+ elevenlabs_stability: typing.Optional[float] = OMIT,
+ elevenlabs_similarity_boost: typing.Optional[float] = OMIT,
+ elevenlabs_style: typing.Optional[float] = OMIT,
+ elevenlabs_speaker_boost: typing.Optional[bool] = OMIT,
+ azure_voice_name: typing.Optional[str] = OMIT,
+ openai_voice_name: typing.Optional[VideoBotsPageRequestOpenaiVoiceName] = OMIT,
+ openai_tts_model: typing.Optional[VideoBotsPageRequestOpenaiTtsModel] = OMIT,
+ input_face: typing.Optional[str] = OMIT,
+ face_padding_top: typing.Optional[int] = OMIT,
+ face_padding_bottom: typing.Optional[int] = OMIT,
+ face_padding_left: typing.Optional[int] = OMIT,
+ face_padding_right: typing.Optional[int] = OMIT,
+ sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ input_prompt : typing.Optional[str]
+
+ input_audio : typing.Optional[str]
+
+ input_images : typing.Optional[typing.Sequence[str]]
+
+ input_documents : typing.Optional[typing.Sequence[str]]
+
+ doc_extract_url : typing.Optional[str]
+ Select a workflow to extract text from documents and images.
+
+ messages : typing.Optional[typing.Sequence[ConversationEntry]]
+
+ bot_script : typing.Optional[str]
+
+ selected_model : typing.Optional[VideoBotsPageRequestSelectedModel]
+
+ document_model : typing.Optional[str]
+ When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api))
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ task_instructions : typing.Optional[str]
+
+ query_instructions : typing.Optional[str]
+
+ keyword_instructions : typing.Optional[str]
+
+ documents : typing.Optional[typing.Sequence[str]]
+
+ max_references : typing.Optional[int]
+
+ max_context_words : typing.Optional[int]
+
+ scroll_jump : typing.Optional[int]
+
+ embedding_model : typing.Optional[VideoBotsPageRequestEmbeddingModel]
+
+ dense_weight : typing.Optional[float]
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+ citation_style : typing.Optional[VideoBotsPageRequestCitationStyle]
+
+ use_url_shortener : typing.Optional[bool]
+
+ asr_model : typing.Optional[VideoBotsPageRequestAsrModel]
+ Choose a model to transcribe incoming audio messages to text.
+
+ asr_language : typing.Optional[str]
+ Choose a language to transcribe incoming audio messages to text.
+
+ translation_model : typing.Optional[VideoBotsPageRequestTranslationModel]
+
+ user_language : typing.Optional[str]
+ Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages.
+
+ input_glossary_document : typing.Optional[str]
+ Translation Glossary for User Langauge -> LLM Language (English)
+
+ output_glossary_document : typing.Optional[str]
+ Translation Glossary for LLM Language (English) -> User Langauge
+
+ lipsync_model : typing.Optional[VideoBotsPageRequestLipsyncModel]
+
+ tools : typing.Optional[typing.Sequence[LlmTools]]
+ Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
+
+ tts_provider : typing.Optional[VideoBotsPageRequestTtsProvider]
+
+ uberduck_voice_name : typing.Optional[str]
+
+ uberduck_speaking_rate : typing.Optional[float]
+
+ google_voice_name : typing.Optional[str]
+
+ google_speaking_rate : typing.Optional[float]
+
+ google_pitch : typing.Optional[float]
+
+ bark_history_prompt : typing.Optional[str]
+
+ elevenlabs_voice_name : typing.Optional[str]
+ Use `elevenlabs_voice_id` instead
+
+ elevenlabs_api_key : typing.Optional[str]
+
+ elevenlabs_voice_id : typing.Optional[str]
+
+ elevenlabs_model : typing.Optional[str]
+
+ elevenlabs_stability : typing.Optional[float]
+
+ elevenlabs_similarity_boost : typing.Optional[float]
+
+ elevenlabs_style : typing.Optional[float]
+
+ elevenlabs_speaker_boost : typing.Optional[bool]
+
+ azure_voice_name : typing.Optional[str]
+
+ openai_voice_name : typing.Optional[VideoBotsPageRequestOpenaiVoiceName]
+
+ openai_tts_model : typing.Optional[VideoBotsPageRequestOpenaiTtsModel]
+
+ input_face : typing.Optional[str]
+
+ face_padding_top : typing.Optional[int]
+
+ face_padding_bottom : typing.Optional[int]
+
+ face_padding_left : typing.Optional[int]
+
+ face_padding_right : typing.Optional[int]
+
+ sadtalker_settings : typing.Optional[SadTalkerSettings]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.copilot_for_your_enterprise.async_video_bots()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/video-bots/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "input_prompt": input_prompt,
+ "input_audio": input_audio,
+ "input_images": input_images,
+ "input_documents": input_documents,
+ "doc_extract_url": doc_extract_url,
+ "messages": messages,
+ "bot_script": bot_script,
+ "selected_model": selected_model,
+ "document_model": document_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "task_instructions": task_instructions,
+ "query_instructions": query_instructions,
+ "keyword_instructions": keyword_instructions,
+ "documents": documents,
+ "max_references": max_references,
+ "max_context_words": max_context_words,
+ "scroll_jump": scroll_jump,
+ "embedding_model": embedding_model,
+ "dense_weight": dense_weight,
+ "citation_style": citation_style,
+ "use_url_shortener": use_url_shortener,
+ "asr_model": asr_model,
+ "asr_language": asr_language,
+ "translation_model": translation_model,
+ "user_language": user_language,
+ "input_glossary_document": input_glossary_document,
+ "output_glossary_document": output_glossary_document,
+ "lipsync_model": lipsync_model,
+ "tools": tools,
+ "tts_provider": tts_provider,
+ "uberduck_voice_name": uberduck_voice_name,
+ "uberduck_speaking_rate": uberduck_speaking_rate,
+ "google_voice_name": google_voice_name,
+ "google_speaking_rate": google_speaking_rate,
+ "google_pitch": google_pitch,
+ "bark_history_prompt": bark_history_prompt,
+ "elevenlabs_voice_name": elevenlabs_voice_name,
+ "elevenlabs_api_key": elevenlabs_api_key,
+ "elevenlabs_voice_id": elevenlabs_voice_id,
+ "elevenlabs_model": elevenlabs_model,
+ "elevenlabs_stability": elevenlabs_stability,
+ "elevenlabs_similarity_boost": elevenlabs_similarity_boost,
+ "elevenlabs_style": elevenlabs_style,
+ "elevenlabs_speaker_boost": elevenlabs_speaker_boost,
+ "azure_voice_name": azure_voice_name,
+ "openai_voice_name": openai_voice_name,
+ "openai_tts_model": openai_tts_model,
+ "input_face": input_face,
+ "face_padding_top": face_padding_top,
+ "face_padding_bottom": face_padding_bottom,
+ "face_padding_left": face_padding_left,
+ "face_padding_right": face_padding_right,
+ "sadtalker_settings": sadtalker_settings,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def status_video_bots(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> VideoBotsPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ VideoBotsPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.copilot_for_your_enterprise.status_video_bots(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/video-bots/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(VideoBotsPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncCopilotForYourEnterpriseClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def video_bots(
+ self,
+ *,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ input_prompt: typing.Optional[str] = OMIT,
+ input_audio: typing.Optional[str] = OMIT,
+ input_images: typing.Optional[typing.Sequence[str]] = OMIT,
+ input_documents: typing.Optional[typing.Sequence[str]] = OMIT,
+ doc_extract_url: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ConversationEntry]] = OMIT,
+ bot_script: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[VideoBotsPageRequestSelectedModel] = OMIT,
+ document_model: typing.Optional[str] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ task_instructions: typing.Optional[str] = OMIT,
+ query_instructions: typing.Optional[str] = OMIT,
+ keyword_instructions: typing.Optional[str] = OMIT,
+ documents: typing.Optional[typing.Sequence[str]] = OMIT,
+ max_references: typing.Optional[int] = OMIT,
+ max_context_words: typing.Optional[int] = OMIT,
+ scroll_jump: typing.Optional[int] = OMIT,
+ embedding_model: typing.Optional[VideoBotsPageRequestEmbeddingModel] = OMIT,
+ dense_weight: typing.Optional[float] = OMIT,
+ citation_style: typing.Optional[VideoBotsPageRequestCitationStyle] = OMIT,
+ use_url_shortener: typing.Optional[bool] = OMIT,
+ asr_model: typing.Optional[VideoBotsPageRequestAsrModel] = OMIT,
+ asr_language: typing.Optional[str] = OMIT,
+ translation_model: typing.Optional[VideoBotsPageRequestTranslationModel] = OMIT,
+ user_language: typing.Optional[str] = OMIT,
+ input_glossary_document: typing.Optional[str] = OMIT,
+ output_glossary_document: typing.Optional[str] = OMIT,
+ lipsync_model: typing.Optional[VideoBotsPageRequestLipsyncModel] = OMIT,
+ tools: typing.Optional[typing.Sequence[LlmTools]] = OMIT,
+ tts_provider: typing.Optional[VideoBotsPageRequestTtsProvider] = OMIT,
+ uberduck_voice_name: typing.Optional[str] = OMIT,
+ uberduck_speaking_rate: typing.Optional[float] = OMIT,
+ google_voice_name: typing.Optional[str] = OMIT,
+ google_speaking_rate: typing.Optional[float] = OMIT,
+ google_pitch: typing.Optional[float] = OMIT,
+ bark_history_prompt: typing.Optional[str] = OMIT,
+ elevenlabs_voice_name: typing.Optional[str] = OMIT,
+ elevenlabs_api_key: typing.Optional[str] = OMIT,
+ elevenlabs_voice_id: typing.Optional[str] = OMIT,
+ elevenlabs_model: typing.Optional[str] = OMIT,
+ elevenlabs_stability: typing.Optional[float] = OMIT,
+ elevenlabs_similarity_boost: typing.Optional[float] = OMIT,
+ elevenlabs_style: typing.Optional[float] = OMIT,
+ elevenlabs_speaker_boost: typing.Optional[bool] = OMIT,
+ azure_voice_name: typing.Optional[str] = OMIT,
+ openai_voice_name: typing.Optional[VideoBotsPageRequestOpenaiVoiceName] = OMIT,
+ openai_tts_model: typing.Optional[VideoBotsPageRequestOpenaiTtsModel] = OMIT,
+ input_face: typing.Optional[str] = OMIT,
+ face_padding_top: typing.Optional[int] = OMIT,
+ face_padding_bottom: typing.Optional[int] = OMIT,
+ face_padding_left: typing.Optional[int] = OMIT,
+ face_padding_right: typing.Optional[int] = OMIT,
+ sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> VideoBotsPageResponse:
+ """
+ Parameters
+ ----------
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ input_prompt : typing.Optional[str]
+
+ input_audio : typing.Optional[str]
+
+ input_images : typing.Optional[typing.Sequence[str]]
+
+ input_documents : typing.Optional[typing.Sequence[str]]
+
+ doc_extract_url : typing.Optional[str]
+ Select a workflow to extract text from documents and images.
+
+ messages : typing.Optional[typing.Sequence[ConversationEntry]]
+
+ bot_script : typing.Optional[str]
+
+ selected_model : typing.Optional[VideoBotsPageRequestSelectedModel]
+
+ document_model : typing.Optional[str]
+ When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api))
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ task_instructions : typing.Optional[str]
+
+ query_instructions : typing.Optional[str]
+
+ keyword_instructions : typing.Optional[str]
+
+ documents : typing.Optional[typing.Sequence[str]]
+
+ max_references : typing.Optional[int]
+
+ max_context_words : typing.Optional[int]
+
+ scroll_jump : typing.Optional[int]
+
+ embedding_model : typing.Optional[VideoBotsPageRequestEmbeddingModel]
+
+ dense_weight : typing.Optional[float]
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+ citation_style : typing.Optional[VideoBotsPageRequestCitationStyle]
+
+ use_url_shortener : typing.Optional[bool]
+
+ asr_model : typing.Optional[VideoBotsPageRequestAsrModel]
+ Choose a model to transcribe incoming audio messages to text.
+
+ asr_language : typing.Optional[str]
+ Choose a language to transcribe incoming audio messages to text.
+
+ translation_model : typing.Optional[VideoBotsPageRequestTranslationModel]
+
+ user_language : typing.Optional[str]
+ Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages.
+
+ input_glossary_document : typing.Optional[str]
+ Translation Glossary for User Langauge -> LLM Language (English)
+
+ output_glossary_document : typing.Optional[str]
+ Translation Glossary for LLM Language (English) -> User Langauge
+
+ lipsync_model : typing.Optional[VideoBotsPageRequestLipsyncModel]
+
+ tools : typing.Optional[typing.Sequence[LlmTools]]
+ Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
+
+ tts_provider : typing.Optional[VideoBotsPageRequestTtsProvider]
+
+ uberduck_voice_name : typing.Optional[str]
+
+ uberduck_speaking_rate : typing.Optional[float]
+
+ google_voice_name : typing.Optional[str]
+
+ google_speaking_rate : typing.Optional[float]
+
+ google_pitch : typing.Optional[float]
+
+ bark_history_prompt : typing.Optional[str]
+
+ elevenlabs_voice_name : typing.Optional[str]
+ Use `elevenlabs_voice_id` instead
+
+ elevenlabs_api_key : typing.Optional[str]
+
+ elevenlabs_voice_id : typing.Optional[str]
+
+ elevenlabs_model : typing.Optional[str]
+
+ elevenlabs_stability : typing.Optional[float]
+
+ elevenlabs_similarity_boost : typing.Optional[float]
+
+ elevenlabs_style : typing.Optional[float]
+
+ elevenlabs_speaker_boost : typing.Optional[bool]
+
+ azure_voice_name : typing.Optional[str]
+
+ openai_voice_name : typing.Optional[VideoBotsPageRequestOpenaiVoiceName]
+
+ openai_tts_model : typing.Optional[VideoBotsPageRequestOpenaiTtsModel]
+
+ input_face : typing.Optional[str]
+
+ face_padding_top : typing.Optional[int]
+
+ face_padding_bottom : typing.Optional[int]
+
+ face_padding_left : typing.Optional[int]
+
+ face_padding_right : typing.Optional[int]
+
+ sadtalker_settings : typing.Optional[SadTalkerSettings]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ VideoBotsPageResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.copilot_for_your_enterprise.video_bots()
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v2/video-bots/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "input_prompt": input_prompt,
+ "input_audio": input_audio,
+ "input_images": input_images,
+ "input_documents": input_documents,
+ "doc_extract_url": doc_extract_url,
+ "messages": messages,
+ "bot_script": bot_script,
+ "selected_model": selected_model,
+ "document_model": document_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "task_instructions": task_instructions,
+ "query_instructions": query_instructions,
+ "keyword_instructions": keyword_instructions,
+ "documents": documents,
+ "max_references": max_references,
+ "max_context_words": max_context_words,
+ "scroll_jump": scroll_jump,
+ "embedding_model": embedding_model,
+ "dense_weight": dense_weight,
+ "citation_style": citation_style,
+ "use_url_shortener": use_url_shortener,
+ "asr_model": asr_model,
+ "asr_language": asr_language,
+ "translation_model": translation_model,
+ "user_language": user_language,
+ "input_glossary_document": input_glossary_document,
+ "output_glossary_document": output_glossary_document,
+ "lipsync_model": lipsync_model,
+ "tools": tools,
+ "tts_provider": tts_provider,
+ "uberduck_voice_name": uberduck_voice_name,
+ "uberduck_speaking_rate": uberduck_speaking_rate,
+ "google_voice_name": google_voice_name,
+ "google_speaking_rate": google_speaking_rate,
+ "google_pitch": google_pitch,
+ "bark_history_prompt": bark_history_prompt,
+ "elevenlabs_voice_name": elevenlabs_voice_name,
+ "elevenlabs_api_key": elevenlabs_api_key,
+ "elevenlabs_voice_id": elevenlabs_voice_id,
+ "elevenlabs_model": elevenlabs_model,
+ "elevenlabs_stability": elevenlabs_stability,
+ "elevenlabs_similarity_boost": elevenlabs_similarity_boost,
+ "elevenlabs_style": elevenlabs_style,
+ "elevenlabs_speaker_boost": elevenlabs_speaker_boost,
+ "azure_voice_name": azure_voice_name,
+ "openai_voice_name": openai_voice_name,
+ "openai_tts_model": openai_tts_model,
+ "input_face": input_face,
+ "face_padding_top": face_padding_top,
+ "face_padding_bottom": face_padding_bottom,
+ "face_padding_left": face_padding_left,
+ "face_padding_right": face_padding_right,
+ "sadtalker_settings": sadtalker_settings,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(VideoBotsPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def async_video_bots(
+ self,
+ *,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ input_prompt: typing.Optional[str] = OMIT,
+ input_audio: typing.Optional[str] = OMIT,
+ input_images: typing.Optional[typing.Sequence[str]] = OMIT,
+ input_documents: typing.Optional[typing.Sequence[str]] = OMIT,
+ doc_extract_url: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ConversationEntry]] = OMIT,
+ bot_script: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[VideoBotsPageRequestSelectedModel] = OMIT,
+ document_model: typing.Optional[str] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ task_instructions: typing.Optional[str] = OMIT,
+ query_instructions: typing.Optional[str] = OMIT,
+ keyword_instructions: typing.Optional[str] = OMIT,
+ documents: typing.Optional[typing.Sequence[str]] = OMIT,
+ max_references: typing.Optional[int] = OMIT,
+ max_context_words: typing.Optional[int] = OMIT,
+ scroll_jump: typing.Optional[int] = OMIT,
+ embedding_model: typing.Optional[VideoBotsPageRequestEmbeddingModel] = OMIT,
+ dense_weight: typing.Optional[float] = OMIT,
+ citation_style: typing.Optional[VideoBotsPageRequestCitationStyle] = OMIT,
+ use_url_shortener: typing.Optional[bool] = OMIT,
+ asr_model: typing.Optional[VideoBotsPageRequestAsrModel] = OMIT,
+ asr_language: typing.Optional[str] = OMIT,
+ translation_model: typing.Optional[VideoBotsPageRequestTranslationModel] = OMIT,
+ user_language: typing.Optional[str] = OMIT,
+ input_glossary_document: typing.Optional[str] = OMIT,
+ output_glossary_document: typing.Optional[str] = OMIT,
+ lipsync_model: typing.Optional[VideoBotsPageRequestLipsyncModel] = OMIT,
+ tools: typing.Optional[typing.Sequence[LlmTools]] = OMIT,
+ tts_provider: typing.Optional[VideoBotsPageRequestTtsProvider] = OMIT,
+ uberduck_voice_name: typing.Optional[str] = OMIT,
+ uberduck_speaking_rate: typing.Optional[float] = OMIT,
+ google_voice_name: typing.Optional[str] = OMIT,
+ google_speaking_rate: typing.Optional[float] = OMIT,
+ google_pitch: typing.Optional[float] = OMIT,
+ bark_history_prompt: typing.Optional[str] = OMIT,
+ elevenlabs_voice_name: typing.Optional[str] = OMIT,
+ elevenlabs_api_key: typing.Optional[str] = OMIT,
+ elevenlabs_voice_id: typing.Optional[str] = OMIT,
+ elevenlabs_model: typing.Optional[str] = OMIT,
+ elevenlabs_stability: typing.Optional[float] = OMIT,
+ elevenlabs_similarity_boost: typing.Optional[float] = OMIT,
+ elevenlabs_style: typing.Optional[float] = OMIT,
+ elevenlabs_speaker_boost: typing.Optional[bool] = OMIT,
+ azure_voice_name: typing.Optional[str] = OMIT,
+ openai_voice_name: typing.Optional[VideoBotsPageRequestOpenaiVoiceName] = OMIT,
+ openai_tts_model: typing.Optional[VideoBotsPageRequestOpenaiTtsModel] = OMIT,
+ input_face: typing.Optional[str] = OMIT,
+ face_padding_top: typing.Optional[int] = OMIT,
+ face_padding_bottom: typing.Optional[int] = OMIT,
+ face_padding_left: typing.Optional[int] = OMIT,
+ face_padding_right: typing.Optional[int] = OMIT,
+ sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ input_prompt : typing.Optional[str]
+
+ input_audio : typing.Optional[str]
+
+ input_images : typing.Optional[typing.Sequence[str]]
+
+ input_documents : typing.Optional[typing.Sequence[str]]
+
+ doc_extract_url : typing.Optional[str]
+ Select a workflow to extract text from documents and images.
+
+ messages : typing.Optional[typing.Sequence[ConversationEntry]]
+
+ bot_script : typing.Optional[str]
+
+ selected_model : typing.Optional[VideoBotsPageRequestSelectedModel]
+
+ document_model : typing.Optional[str]
+ When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api))
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ task_instructions : typing.Optional[str]
+
+ query_instructions : typing.Optional[str]
+
+ keyword_instructions : typing.Optional[str]
+
+ documents : typing.Optional[typing.Sequence[str]]
+
+ max_references : typing.Optional[int]
+
+ max_context_words : typing.Optional[int]
+
+ scroll_jump : typing.Optional[int]
+
+ embedding_model : typing.Optional[VideoBotsPageRequestEmbeddingModel]
+
+ dense_weight : typing.Optional[float]
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+ citation_style : typing.Optional[VideoBotsPageRequestCitationStyle]
+
+ use_url_shortener : typing.Optional[bool]
+
+ asr_model : typing.Optional[VideoBotsPageRequestAsrModel]
+ Choose a model to transcribe incoming audio messages to text.
+
+ asr_language : typing.Optional[str]
+ Choose a language to transcribe incoming audio messages to text.
+
+ translation_model : typing.Optional[VideoBotsPageRequestTranslationModel]
+
+ user_language : typing.Optional[str]
+ Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages.
+
+ input_glossary_document : typing.Optional[str]
+ Translation Glossary for User Langauge -> LLM Language (English)
+
+ output_glossary_document : typing.Optional[str]
+ Translation Glossary for LLM Language (English) -> User Langauge
+
+ lipsync_model : typing.Optional[VideoBotsPageRequestLipsyncModel]
+
+ tools : typing.Optional[typing.Sequence[LlmTools]]
+ Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
+
+ tts_provider : typing.Optional[VideoBotsPageRequestTtsProvider]
+
+ uberduck_voice_name : typing.Optional[str]
+
+ uberduck_speaking_rate : typing.Optional[float]
+
+ google_voice_name : typing.Optional[str]
+
+ google_speaking_rate : typing.Optional[float]
+
+ google_pitch : typing.Optional[float]
+
+ bark_history_prompt : typing.Optional[str]
+
+ elevenlabs_voice_name : typing.Optional[str]
+ Use `elevenlabs_voice_id` instead
+
+ elevenlabs_api_key : typing.Optional[str]
+
+ elevenlabs_voice_id : typing.Optional[str]
+
+ elevenlabs_model : typing.Optional[str]
+
+ elevenlabs_stability : typing.Optional[float]
+
+ elevenlabs_similarity_boost : typing.Optional[float]
+
+ elevenlabs_style : typing.Optional[float]
+
+ elevenlabs_speaker_boost : typing.Optional[bool]
+
+ azure_voice_name : typing.Optional[str]
+
+ openai_voice_name : typing.Optional[VideoBotsPageRequestOpenaiVoiceName]
+
+ openai_tts_model : typing.Optional[VideoBotsPageRequestOpenaiTtsModel]
+
+ input_face : typing.Optional[str]
+
+ face_padding_top : typing.Optional[int]
+
+ face_padding_bottom : typing.Optional[int]
+
+ face_padding_left : typing.Optional[int]
+
+ face_padding_right : typing.Optional[int]
+
+ sadtalker_settings : typing.Optional[SadTalkerSettings]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.copilot_for_your_enterprise.async_video_bots()
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/video-bots/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "input_prompt": input_prompt,
+ "input_audio": input_audio,
+ "input_images": input_images,
+ "input_documents": input_documents,
+ "doc_extract_url": doc_extract_url,
+ "messages": messages,
+ "bot_script": bot_script,
+ "selected_model": selected_model,
+ "document_model": document_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "task_instructions": task_instructions,
+ "query_instructions": query_instructions,
+ "keyword_instructions": keyword_instructions,
+ "documents": documents,
+ "max_references": max_references,
+ "max_context_words": max_context_words,
+ "scroll_jump": scroll_jump,
+ "embedding_model": embedding_model,
+ "dense_weight": dense_weight,
+ "citation_style": citation_style,
+ "use_url_shortener": use_url_shortener,
+ "asr_model": asr_model,
+ "asr_language": asr_language,
+ "translation_model": translation_model,
+ "user_language": user_language,
+ "input_glossary_document": input_glossary_document,
+ "output_glossary_document": output_glossary_document,
+ "lipsync_model": lipsync_model,
+ "tools": tools,
+ "tts_provider": tts_provider,
+ "uberduck_voice_name": uberduck_voice_name,
+ "uberduck_speaking_rate": uberduck_speaking_rate,
+ "google_voice_name": google_voice_name,
+ "google_speaking_rate": google_speaking_rate,
+ "google_pitch": google_pitch,
+ "bark_history_prompt": bark_history_prompt,
+ "elevenlabs_voice_name": elevenlabs_voice_name,
+ "elevenlabs_api_key": elevenlabs_api_key,
+ "elevenlabs_voice_id": elevenlabs_voice_id,
+ "elevenlabs_model": elevenlabs_model,
+ "elevenlabs_stability": elevenlabs_stability,
+ "elevenlabs_similarity_boost": elevenlabs_similarity_boost,
+ "elevenlabs_style": elevenlabs_style,
+ "elevenlabs_speaker_boost": elevenlabs_speaker_boost,
+ "azure_voice_name": azure_voice_name,
+ "openai_voice_name": openai_voice_name,
+ "openai_tts_model": openai_tts_model,
+ "input_face": input_face,
+ "face_padding_top": face_padding_top,
+ "face_padding_bottom": face_padding_bottom,
+ "face_padding_left": face_padding_left,
+ "face_padding_right": face_padding_right,
+ "sadtalker_settings": sadtalker_settings,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def status_video_bots(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> VideoBotsPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ VideoBotsPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.copilot_for_your_enterprise.status_video_bots(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/video-bots/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(VideoBotsPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/copilot_integrations/__init__.py b/src/gooey/copilot_integrations/__init__.py
new file mode 100644
index 0000000..3861c31
--- /dev/null
+++ b/src/gooey/copilot_integrations/__init__.py
@@ -0,0 +1,27 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .types import (
+ CreateStreamRequestAsrModel,
+ CreateStreamRequestCitationStyle,
+ CreateStreamRequestEmbeddingModel,
+ CreateStreamRequestLipsyncModel,
+ CreateStreamRequestOpenaiTtsModel,
+ CreateStreamRequestOpenaiVoiceName,
+ CreateStreamRequestSelectedModel,
+ CreateStreamRequestTranslationModel,
+ CreateStreamRequestTtsProvider,
+ VideoBotsStreamResponse,
+)
+
+__all__ = [
+ "CreateStreamRequestAsrModel",
+ "CreateStreamRequestCitationStyle",
+ "CreateStreamRequestEmbeddingModel",
+ "CreateStreamRequestLipsyncModel",
+ "CreateStreamRequestOpenaiTtsModel",
+ "CreateStreamRequestOpenaiVoiceName",
+ "CreateStreamRequestSelectedModel",
+ "CreateStreamRequestTranslationModel",
+ "CreateStreamRequestTtsProvider",
+ "VideoBotsStreamResponse",
+]
diff --git a/src/gooey/copilot_integrations/client.py b/src/gooey/copilot_integrations/client.py
new file mode 100644
index 0000000..c1e95c0
--- /dev/null
+++ b/src/gooey/copilot_integrations/client.py
@@ -0,0 +1,828 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.jsonable_encoder import jsonable_encoder
+from ..core.pydantic_utilities import pydantic_v1
+from ..core.request_options import RequestOptions
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.button_pressed import ButtonPressed
+from ..types.conversation_entry import ConversationEntry
+from ..types.create_stream_response import CreateStreamResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.llm_tools import LlmTools
+from ..types.recipe_function import RecipeFunction
+from ..types.sad_talker_settings import SadTalkerSettings
+from .types.create_stream_request_asr_model import CreateStreamRequestAsrModel
+from .types.create_stream_request_citation_style import CreateStreamRequestCitationStyle
+from .types.create_stream_request_embedding_model import CreateStreamRequestEmbeddingModel
+from .types.create_stream_request_lipsync_model import CreateStreamRequestLipsyncModel
+from .types.create_stream_request_openai_tts_model import CreateStreamRequestOpenaiTtsModel
+from .types.create_stream_request_openai_voice_name import CreateStreamRequestOpenaiVoiceName
+from .types.create_stream_request_selected_model import CreateStreamRequestSelectedModel
+from .types.create_stream_request_translation_model import CreateStreamRequestTranslationModel
+from .types.create_stream_request_tts_provider import CreateStreamRequestTtsProvider
+from .types.video_bots_stream_response import VideoBotsStreamResponse
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class CopilotIntegrationsClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def video_bots_stream_create(
+ self,
+ *,
+ integration_id: str,
+ conversation_id: typing.Optional[str] = OMIT,
+ user_id: typing.Optional[str] = OMIT,
+ user_message_id: typing.Optional[str] = OMIT,
+ button_pressed: typing.Optional[ButtonPressed] = OMIT,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ input_prompt: typing.Optional[str] = OMIT,
+ input_audio: typing.Optional[str] = OMIT,
+ input_images: typing.Optional[typing.Sequence[str]] = OMIT,
+ input_documents: typing.Optional[typing.Sequence[str]] = OMIT,
+ doc_extract_url: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ConversationEntry]] = OMIT,
+ bot_script: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[CreateStreamRequestSelectedModel] = OMIT,
+ document_model: typing.Optional[str] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ task_instructions: typing.Optional[str] = OMIT,
+ query_instructions: typing.Optional[str] = OMIT,
+ keyword_instructions: typing.Optional[str] = OMIT,
+ documents: typing.Optional[typing.Sequence[str]] = OMIT,
+ max_references: typing.Optional[int] = OMIT,
+ max_context_words: typing.Optional[int] = OMIT,
+ scroll_jump: typing.Optional[int] = OMIT,
+ embedding_model: typing.Optional[CreateStreamRequestEmbeddingModel] = OMIT,
+ dense_weight: typing.Optional[float] = OMIT,
+ citation_style: typing.Optional[CreateStreamRequestCitationStyle] = OMIT,
+ use_url_shortener: typing.Optional[bool] = OMIT,
+ asr_model: typing.Optional[CreateStreamRequestAsrModel] = OMIT,
+ asr_language: typing.Optional[str] = OMIT,
+ translation_model: typing.Optional[CreateStreamRequestTranslationModel] = OMIT,
+ user_language: typing.Optional[str] = OMIT,
+ input_glossary_document: typing.Optional[str] = OMIT,
+ output_glossary_document: typing.Optional[str] = OMIT,
+ lipsync_model: typing.Optional[CreateStreamRequestLipsyncModel] = OMIT,
+ tools: typing.Optional[typing.Sequence[LlmTools]] = OMIT,
+ tts_provider: typing.Optional[CreateStreamRequestTtsProvider] = OMIT,
+ uberduck_voice_name: typing.Optional[str] = OMIT,
+ uberduck_speaking_rate: typing.Optional[float] = OMIT,
+ google_voice_name: typing.Optional[str] = OMIT,
+ google_speaking_rate: typing.Optional[float] = OMIT,
+ google_pitch: typing.Optional[float] = OMIT,
+ bark_history_prompt: typing.Optional[str] = OMIT,
+ elevenlabs_voice_name: typing.Optional[str] = OMIT,
+ elevenlabs_api_key: typing.Optional[str] = OMIT,
+ elevenlabs_voice_id: typing.Optional[str] = OMIT,
+ elevenlabs_model: typing.Optional[str] = OMIT,
+ elevenlabs_stability: typing.Optional[float] = OMIT,
+ elevenlabs_similarity_boost: typing.Optional[float] = OMIT,
+ elevenlabs_style: typing.Optional[float] = OMIT,
+ elevenlabs_speaker_boost: typing.Optional[bool] = OMIT,
+ azure_voice_name: typing.Optional[str] = OMIT,
+ openai_voice_name: typing.Optional[CreateStreamRequestOpenaiVoiceName] = OMIT,
+ openai_tts_model: typing.Optional[CreateStreamRequestOpenaiTtsModel] = OMIT,
+ input_face: typing.Optional[str] = OMIT,
+ face_padding_top: typing.Optional[int] = OMIT,
+ face_padding_bottom: typing.Optional[int] = OMIT,
+ face_padding_left: typing.Optional[int] = OMIT,
+ face_padding_right: typing.Optional[int] = OMIT,
+ sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT,
+ input_text: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> CreateStreamResponse:
+ """
+ Parameters
+ ----------
+ integration_id : str
+ Your Integration ID as shown in the Copilot Integrations tab
+
+ conversation_id : typing.Optional[str]
+ The gooey conversation ID.
+
+ If not provided, a new conversation will be started and a new ID will be returned in the response. Use this to maintain the state of the conversation between requests.
+
+ Note that you may not provide a custom ID here, and must only use the `conversation_id` returned in a previous response.
+
+ user_id : typing.Optional[str]
+ Your app's custom user ID.
+
+ If not provided, a random user will be created and a new ID will be returned in the response. If a `conversation_id` is provided, this field is automatically set to the user's id associated with that conversation.
+
+ user_message_id : typing.Optional[str]
+ Your app's custom message ID for the user message.
+
+ If not provided, a random ID will be generated and returned in the response. This is useful for tracking messages in the conversation.
+
+ button_pressed : typing.Optional[ButtonPressed]
+ The button that was pressed by the user.
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ input_prompt : typing.Optional[str]
+
+ input_audio : typing.Optional[str]
+
+ input_images : typing.Optional[typing.Sequence[str]]
+
+ input_documents : typing.Optional[typing.Sequence[str]]
+
+ doc_extract_url : typing.Optional[str]
+ Select a workflow to extract text from documents and images.
+
+ messages : typing.Optional[typing.Sequence[ConversationEntry]]
+
+ bot_script : typing.Optional[str]
+
+ selected_model : typing.Optional[CreateStreamRequestSelectedModel]
+
+ document_model : typing.Optional[str]
+ When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api))
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ task_instructions : typing.Optional[str]
+
+ query_instructions : typing.Optional[str]
+
+ keyword_instructions : typing.Optional[str]
+
+ documents : typing.Optional[typing.Sequence[str]]
+
+ max_references : typing.Optional[int]
+
+ max_context_words : typing.Optional[int]
+
+ scroll_jump : typing.Optional[int]
+
+ embedding_model : typing.Optional[CreateStreamRequestEmbeddingModel]
+
+ dense_weight : typing.Optional[float]
+
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+
+ citation_style : typing.Optional[CreateStreamRequestCitationStyle]
+
+ use_url_shortener : typing.Optional[bool]
+
+ asr_model : typing.Optional[CreateStreamRequestAsrModel]
+ Choose a model to transcribe incoming audio messages to text.
+
+ asr_language : typing.Optional[str]
+ Choose a language to transcribe incoming audio messages to text.
+
+ translation_model : typing.Optional[CreateStreamRequestTranslationModel]
+
+ user_language : typing.Optional[str]
+ Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages.
+
+ input_glossary_document : typing.Optional[str]
+
+ Translation Glossary for User Langauge -> LLM Language (English)
+
+
+ output_glossary_document : typing.Optional[str]
+
+ Translation Glossary for LLM Language (English) -> User Langauge
+
+
+ lipsync_model : typing.Optional[CreateStreamRequestLipsyncModel]
+
+ tools : typing.Optional[typing.Sequence[LlmTools]]
+ Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
+
+ tts_provider : typing.Optional[CreateStreamRequestTtsProvider]
+
+ uberduck_voice_name : typing.Optional[str]
+
+ uberduck_speaking_rate : typing.Optional[float]
+
+ google_voice_name : typing.Optional[str]
+
+ google_speaking_rate : typing.Optional[float]
+
+ google_pitch : typing.Optional[float]
+
+ bark_history_prompt : typing.Optional[str]
+
+ elevenlabs_voice_name : typing.Optional[str]
+ Use `elevenlabs_voice_id` instead
+
+ elevenlabs_api_key : typing.Optional[str]
+
+ elevenlabs_voice_id : typing.Optional[str]
+
+ elevenlabs_model : typing.Optional[str]
+
+ elevenlabs_stability : typing.Optional[float]
+
+ elevenlabs_similarity_boost : typing.Optional[float]
+
+ elevenlabs_style : typing.Optional[float]
+
+ elevenlabs_speaker_boost : typing.Optional[bool]
+
+ azure_voice_name : typing.Optional[str]
+
+ openai_voice_name : typing.Optional[CreateStreamRequestOpenaiVoiceName]
+
+ openai_tts_model : typing.Optional[CreateStreamRequestOpenaiTtsModel]
+
+ input_face : typing.Optional[str]
+
+ face_padding_top : typing.Optional[int]
+
+ face_padding_bottom : typing.Optional[int]
+
+ face_padding_left : typing.Optional[int]
+
+ face_padding_right : typing.Optional[int]
+
+ sadtalker_settings : typing.Optional[SadTalkerSettings]
+
+ input_text : typing.Optional[str]
+ Use `input_prompt` instead
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ CreateStreamResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.copilot_integrations.video_bots_stream_create(
+ integration_id="integration_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/integrations/stream/",
+ method="POST",
+ json={
+ "integration_id": integration_id,
+ "conversation_id": conversation_id,
+ "user_id": user_id,
+ "user_message_id": user_message_id,
+ "button_pressed": button_pressed,
+ "functions": functions,
+ "variables": variables,
+ "input_prompt": input_prompt,
+ "input_audio": input_audio,
+ "input_images": input_images,
+ "input_documents": input_documents,
+ "doc_extract_url": doc_extract_url,
+ "messages": messages,
+ "bot_script": bot_script,
+ "selected_model": selected_model,
+ "document_model": document_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "task_instructions": task_instructions,
+ "query_instructions": query_instructions,
+ "keyword_instructions": keyword_instructions,
+ "documents": documents,
+ "max_references": max_references,
+ "max_context_words": max_context_words,
+ "scroll_jump": scroll_jump,
+ "embedding_model": embedding_model,
+ "dense_weight": dense_weight,
+ "citation_style": citation_style,
+ "use_url_shortener": use_url_shortener,
+ "asr_model": asr_model,
+ "asr_language": asr_language,
+ "translation_model": translation_model,
+ "user_language": user_language,
+ "input_glossary_document": input_glossary_document,
+ "output_glossary_document": output_glossary_document,
+ "lipsync_model": lipsync_model,
+ "tools": tools,
+ "tts_provider": tts_provider,
+ "uberduck_voice_name": uberduck_voice_name,
+ "uberduck_speaking_rate": uberduck_speaking_rate,
+ "google_voice_name": google_voice_name,
+ "google_speaking_rate": google_speaking_rate,
+ "google_pitch": google_pitch,
+ "bark_history_prompt": bark_history_prompt,
+ "elevenlabs_voice_name": elevenlabs_voice_name,
+ "elevenlabs_api_key": elevenlabs_api_key,
+ "elevenlabs_voice_id": elevenlabs_voice_id,
+ "elevenlabs_model": elevenlabs_model,
+ "elevenlabs_stability": elevenlabs_stability,
+ "elevenlabs_similarity_boost": elevenlabs_similarity_boost,
+ "elevenlabs_style": elevenlabs_style,
+ "elevenlabs_speaker_boost": elevenlabs_speaker_boost,
+ "azure_voice_name": azure_voice_name,
+ "openai_voice_name": openai_voice_name,
+ "openai_tts_model": openai_tts_model,
+ "input_face": input_face,
+ "face_padding_top": face_padding_top,
+ "face_padding_bottom": face_padding_bottom,
+ "face_padding_left": face_padding_left,
+ "face_padding_right": face_padding_right,
+ "sadtalker_settings": sadtalker_settings,
+ "input_text": input_text,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(CreateStreamResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def video_bots_stream(
+ self, request_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> VideoBotsStreamResponse:
+ """
+ Parameters
+ ----------
+ request_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ VideoBotsStreamResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.copilot_integrations.video_bots_stream(
+ request_id="request_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"v3/integrations/stream/{jsonable_encoder(request_id)}/", method="GET", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(VideoBotsStreamResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncCopilotIntegrationsClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def video_bots_stream_create(
+ self,
+ *,
+ integration_id: str,
+ conversation_id: typing.Optional[str] = OMIT,
+ user_id: typing.Optional[str] = OMIT,
+ user_message_id: typing.Optional[str] = OMIT,
+ button_pressed: typing.Optional[ButtonPressed] = OMIT,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ input_prompt: typing.Optional[str] = OMIT,
+ input_audio: typing.Optional[str] = OMIT,
+ input_images: typing.Optional[typing.Sequence[str]] = OMIT,
+ input_documents: typing.Optional[typing.Sequence[str]] = OMIT,
+ doc_extract_url: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ConversationEntry]] = OMIT,
+ bot_script: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[CreateStreamRequestSelectedModel] = OMIT,
+ document_model: typing.Optional[str] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ task_instructions: typing.Optional[str] = OMIT,
+ query_instructions: typing.Optional[str] = OMIT,
+ keyword_instructions: typing.Optional[str] = OMIT,
+ documents: typing.Optional[typing.Sequence[str]] = OMIT,
+ max_references: typing.Optional[int] = OMIT,
+ max_context_words: typing.Optional[int] = OMIT,
+ scroll_jump: typing.Optional[int] = OMIT,
+ embedding_model: typing.Optional[CreateStreamRequestEmbeddingModel] = OMIT,
+ dense_weight: typing.Optional[float] = OMIT,
+ citation_style: typing.Optional[CreateStreamRequestCitationStyle] = OMIT,
+ use_url_shortener: typing.Optional[bool] = OMIT,
+ asr_model: typing.Optional[CreateStreamRequestAsrModel] = OMIT,
+ asr_language: typing.Optional[str] = OMIT,
+ translation_model: typing.Optional[CreateStreamRequestTranslationModel] = OMIT,
+ user_language: typing.Optional[str] = OMIT,
+ input_glossary_document: typing.Optional[str] = OMIT,
+ output_glossary_document: typing.Optional[str] = OMIT,
+ lipsync_model: typing.Optional[CreateStreamRequestLipsyncModel] = OMIT,
+ tools: typing.Optional[typing.Sequence[LlmTools]] = OMIT,
+ tts_provider: typing.Optional[CreateStreamRequestTtsProvider] = OMIT,
+ uberduck_voice_name: typing.Optional[str] = OMIT,
+ uberduck_speaking_rate: typing.Optional[float] = OMIT,
+ google_voice_name: typing.Optional[str] = OMIT,
+ google_speaking_rate: typing.Optional[float] = OMIT,
+ google_pitch: typing.Optional[float] = OMIT,
+ bark_history_prompt: typing.Optional[str] = OMIT,
+ elevenlabs_voice_name: typing.Optional[str] = OMIT,
+ elevenlabs_api_key: typing.Optional[str] = OMIT,
+ elevenlabs_voice_id: typing.Optional[str] = OMIT,
+ elevenlabs_model: typing.Optional[str] = OMIT,
+ elevenlabs_stability: typing.Optional[float] = OMIT,
+ elevenlabs_similarity_boost: typing.Optional[float] = OMIT,
+ elevenlabs_style: typing.Optional[float] = OMIT,
+ elevenlabs_speaker_boost: typing.Optional[bool] = OMIT,
+ azure_voice_name: typing.Optional[str] = OMIT,
+ openai_voice_name: typing.Optional[CreateStreamRequestOpenaiVoiceName] = OMIT,
+ openai_tts_model: typing.Optional[CreateStreamRequestOpenaiTtsModel] = OMIT,
+ input_face: typing.Optional[str] = OMIT,
+ face_padding_top: typing.Optional[int] = OMIT,
+ face_padding_bottom: typing.Optional[int] = OMIT,
+ face_padding_left: typing.Optional[int] = OMIT,
+ face_padding_right: typing.Optional[int] = OMIT,
+ sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT,
+ input_text: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> CreateStreamResponse:
+ """
+ Parameters
+ ----------
+ integration_id : str
+ Your Integration ID as shown in the Copilot Integrations tab
+
+ conversation_id : typing.Optional[str]
+ The gooey conversation ID.
+
+ If not provided, a new conversation will be started and a new ID will be returned in the response. Use this to maintain the state of the conversation between requests.
+
+ Note that you may not provide a custom ID here, and must only use the `conversation_id` returned in a previous response.
+
+ user_id : typing.Optional[str]
+ Your app's custom user ID.
+
+ If not provided, a random user will be created and a new ID will be returned in the response. If a `conversation_id` is provided, this field is automatically set to the user's id associated with that conversation.
+
+ user_message_id : typing.Optional[str]
+ Your app's custom message ID for the user message.
+
+ If not provided, a random ID will be generated and returned in the response. This is useful for tracking messages in the conversation.
+
+ button_pressed : typing.Optional[ButtonPressed]
+ The button that was pressed by the user.
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ input_prompt : typing.Optional[str]
+
+ input_audio : typing.Optional[str]
+
+ input_images : typing.Optional[typing.Sequence[str]]
+
+ input_documents : typing.Optional[typing.Sequence[str]]
+
+ doc_extract_url : typing.Optional[str]
+ Select a workflow to extract text from documents and images.
+
+ messages : typing.Optional[typing.Sequence[ConversationEntry]]
+
+ bot_script : typing.Optional[str]
+
+ selected_model : typing.Optional[CreateStreamRequestSelectedModel]
+
+ document_model : typing.Optional[str]
+ When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api))
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ task_instructions : typing.Optional[str]
+
+ query_instructions : typing.Optional[str]
+
+ keyword_instructions : typing.Optional[str]
+
+ documents : typing.Optional[typing.Sequence[str]]
+
+ max_references : typing.Optional[int]
+
+ max_context_words : typing.Optional[int]
+
+ scroll_jump : typing.Optional[int]
+
+ embedding_model : typing.Optional[CreateStreamRequestEmbeddingModel]
+
+ dense_weight : typing.Optional[float]
+
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+
+ citation_style : typing.Optional[CreateStreamRequestCitationStyle]
+
+ use_url_shortener : typing.Optional[bool]
+
+ asr_model : typing.Optional[CreateStreamRequestAsrModel]
+ Choose a model to transcribe incoming audio messages to text.
+
+ asr_language : typing.Optional[str]
+ Choose a language to transcribe incoming audio messages to text.
+
+ translation_model : typing.Optional[CreateStreamRequestTranslationModel]
+
+ user_language : typing.Optional[str]
+ Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages.
+
+ input_glossary_document : typing.Optional[str]
+
+ Translation Glossary for User Langauge -> LLM Language (English)
+
+
+ output_glossary_document : typing.Optional[str]
+
+ Translation Glossary for LLM Language (English) -> User Langauge
+
+
+ lipsync_model : typing.Optional[CreateStreamRequestLipsyncModel]
+
+ tools : typing.Optional[typing.Sequence[LlmTools]]
+ Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
+
+ tts_provider : typing.Optional[CreateStreamRequestTtsProvider]
+
+ uberduck_voice_name : typing.Optional[str]
+
+ uberduck_speaking_rate : typing.Optional[float]
+
+ google_voice_name : typing.Optional[str]
+
+ google_speaking_rate : typing.Optional[float]
+
+ google_pitch : typing.Optional[float]
+
+ bark_history_prompt : typing.Optional[str]
+
+ elevenlabs_voice_name : typing.Optional[str]
+ Use `elevenlabs_voice_id` instead
+
+ elevenlabs_api_key : typing.Optional[str]
+
+ elevenlabs_voice_id : typing.Optional[str]
+
+ elevenlabs_model : typing.Optional[str]
+
+ elevenlabs_stability : typing.Optional[float]
+
+ elevenlabs_similarity_boost : typing.Optional[float]
+
+ elevenlabs_style : typing.Optional[float]
+
+ elevenlabs_speaker_boost : typing.Optional[bool]
+
+ azure_voice_name : typing.Optional[str]
+
+ openai_voice_name : typing.Optional[CreateStreamRequestOpenaiVoiceName]
+
+ openai_tts_model : typing.Optional[CreateStreamRequestOpenaiTtsModel]
+
+ input_face : typing.Optional[str]
+
+ face_padding_top : typing.Optional[int]
+
+ face_padding_bottom : typing.Optional[int]
+
+ face_padding_left : typing.Optional[int]
+
+ face_padding_right : typing.Optional[int]
+
+ sadtalker_settings : typing.Optional[SadTalkerSettings]
+
+ input_text : typing.Optional[str]
+ Use `input_prompt` instead
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ CreateStreamResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.copilot_integrations.video_bots_stream_create(
+ integration_id="integration_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/integrations/stream/",
+ method="POST",
+ json={
+ "integration_id": integration_id,
+ "conversation_id": conversation_id,
+ "user_id": user_id,
+ "user_message_id": user_message_id,
+ "button_pressed": button_pressed,
+ "functions": functions,
+ "variables": variables,
+ "input_prompt": input_prompt,
+ "input_audio": input_audio,
+ "input_images": input_images,
+ "input_documents": input_documents,
+ "doc_extract_url": doc_extract_url,
+ "messages": messages,
+ "bot_script": bot_script,
+ "selected_model": selected_model,
+ "document_model": document_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "task_instructions": task_instructions,
+ "query_instructions": query_instructions,
+ "keyword_instructions": keyword_instructions,
+ "documents": documents,
+ "max_references": max_references,
+ "max_context_words": max_context_words,
+ "scroll_jump": scroll_jump,
+ "embedding_model": embedding_model,
+ "dense_weight": dense_weight,
+ "citation_style": citation_style,
+ "use_url_shortener": use_url_shortener,
+ "asr_model": asr_model,
+ "asr_language": asr_language,
+ "translation_model": translation_model,
+ "user_language": user_language,
+ "input_glossary_document": input_glossary_document,
+ "output_glossary_document": output_glossary_document,
+ "lipsync_model": lipsync_model,
+ "tools": tools,
+ "tts_provider": tts_provider,
+ "uberduck_voice_name": uberduck_voice_name,
+ "uberduck_speaking_rate": uberduck_speaking_rate,
+ "google_voice_name": google_voice_name,
+ "google_speaking_rate": google_speaking_rate,
+ "google_pitch": google_pitch,
+ "bark_history_prompt": bark_history_prompt,
+ "elevenlabs_voice_name": elevenlabs_voice_name,
+ "elevenlabs_api_key": elevenlabs_api_key,
+ "elevenlabs_voice_id": elevenlabs_voice_id,
+ "elevenlabs_model": elevenlabs_model,
+ "elevenlabs_stability": elevenlabs_stability,
+ "elevenlabs_similarity_boost": elevenlabs_similarity_boost,
+ "elevenlabs_style": elevenlabs_style,
+ "elevenlabs_speaker_boost": elevenlabs_speaker_boost,
+ "azure_voice_name": azure_voice_name,
+ "openai_voice_name": openai_voice_name,
+ "openai_tts_model": openai_tts_model,
+ "input_face": input_face,
+ "face_padding_top": face_padding_top,
+ "face_padding_bottom": face_padding_bottom,
+ "face_padding_left": face_padding_left,
+ "face_padding_right": face_padding_right,
+ "sadtalker_settings": sadtalker_settings,
+ "input_text": input_text,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(CreateStreamResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def video_bots_stream(
+ self, request_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> VideoBotsStreamResponse:
+ """
+ Parameters
+ ----------
+ request_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ VideoBotsStreamResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.copilot_integrations.video_bots_stream(
+ request_id="request_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"v3/integrations/stream/{jsonable_encoder(request_id)}/", method="GET", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(VideoBotsStreamResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/copilot_integrations/types/__init__.py b/src/gooey/copilot_integrations/types/__init__.py
new file mode 100644
index 0000000..4e7d806
--- /dev/null
+++ b/src/gooey/copilot_integrations/types/__init__.py
@@ -0,0 +1,25 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .create_stream_request_asr_model import CreateStreamRequestAsrModel
+from .create_stream_request_citation_style import CreateStreamRequestCitationStyle
+from .create_stream_request_embedding_model import CreateStreamRequestEmbeddingModel
+from .create_stream_request_lipsync_model import CreateStreamRequestLipsyncModel
+from .create_stream_request_openai_tts_model import CreateStreamRequestOpenaiTtsModel
+from .create_stream_request_openai_voice_name import CreateStreamRequestOpenaiVoiceName
+from .create_stream_request_selected_model import CreateStreamRequestSelectedModel
+from .create_stream_request_translation_model import CreateStreamRequestTranslationModel
+from .create_stream_request_tts_provider import CreateStreamRequestTtsProvider
+from .video_bots_stream_response import VideoBotsStreamResponse
+
+__all__ = [
+ "CreateStreamRequestAsrModel",
+ "CreateStreamRequestCitationStyle",
+ "CreateStreamRequestEmbeddingModel",
+ "CreateStreamRequestLipsyncModel",
+ "CreateStreamRequestOpenaiTtsModel",
+ "CreateStreamRequestOpenaiVoiceName",
+ "CreateStreamRequestSelectedModel",
+ "CreateStreamRequestTranslationModel",
+ "CreateStreamRequestTtsProvider",
+ "VideoBotsStreamResponse",
+]
diff --git a/src/gooey/copilot_integrations/types/create_stream_request_asr_model.py b/src/gooey/copilot_integrations/types/create_stream_request_asr_model.py
new file mode 100644
index 0000000..c6d4550
--- /dev/null
+++ b/src/gooey/copilot_integrations/types/create_stream_request_asr_model.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+CreateStreamRequestAsrModel = typing.Union[
+ typing.Literal[
+ "whisper_large_v2",
+ "whisper_large_v3",
+ "whisper_hindi_large_v2",
+ "whisper_telugu_large_v2",
+ "nemo_english",
+ "nemo_hindi",
+ "vakyansh_bhojpuri",
+ "gcp_v1",
+ "usm",
+ "deepgram",
+ "azure",
+ "seamless_m4t",
+ "mms_1b_all",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/copilot_integrations/types/create_stream_request_citation_style.py b/src/gooey/copilot_integrations/types/create_stream_request_citation_style.py
new file mode 100644
index 0000000..e57bab1
--- /dev/null
+++ b/src/gooey/copilot_integrations/types/create_stream_request_citation_style.py
@@ -0,0 +1,25 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+CreateStreamRequestCitationStyle = typing.Union[
+ typing.Literal[
+ "number",
+ "title",
+ "url",
+ "symbol",
+ "markdown",
+ "html",
+ "slack_mrkdwn",
+ "plaintext",
+ "number_markdown",
+ "number_html",
+ "number_slack_mrkdwn",
+ "number_plaintext",
+ "symbol_markdown",
+ "symbol_html",
+ "symbol_slack_mrkdwn",
+ "symbol_plaintext",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/copilot_integrations/types/create_stream_request_embedding_model.py b/src/gooey/copilot_integrations/types/create_stream_request_embedding_model.py
new file mode 100644
index 0000000..cef26bf
--- /dev/null
+++ b/src/gooey/copilot_integrations/types/create_stream_request_embedding_model.py
@@ -0,0 +1,18 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+CreateStreamRequestEmbeddingModel = typing.Union[
+ typing.Literal[
+ "openai_3_large",
+ "openai_3_small",
+ "openai_ada_2",
+ "e5_large_v2",
+ "e5_base_v2",
+ "multilingual_e5_base",
+ "multilingual_e5_large",
+ "gte_large",
+ "gte_base",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/copilot_integrations/types/create_stream_request_lipsync_model.py b/src/gooey/copilot_integrations/types/create_stream_request_lipsync_model.py
new file mode 100644
index 0000000..c207d45
--- /dev/null
+++ b/src/gooey/copilot_integrations/types/create_stream_request_lipsync_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+CreateStreamRequestLipsyncModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any]
diff --git a/src/gooey/copilot_integrations/types/create_stream_request_openai_tts_model.py b/src/gooey/copilot_integrations/types/create_stream_request_openai_tts_model.py
new file mode 100644
index 0000000..475ca67
--- /dev/null
+++ b/src/gooey/copilot_integrations/types/create_stream_request_openai_tts_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+CreateStreamRequestOpenaiTtsModel = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any]
diff --git a/src/gooey/copilot_integrations/types/create_stream_request_openai_voice_name.py b/src/gooey/copilot_integrations/types/create_stream_request_openai_voice_name.py
new file mode 100644
index 0000000..4f3dd7a
--- /dev/null
+++ b/src/gooey/copilot_integrations/types/create_stream_request_openai_voice_name.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+CreateStreamRequestOpenaiVoiceName = typing.Union[
+ typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any
+]
diff --git a/src/gooey/copilot_integrations/types/create_stream_request_selected_model.py b/src/gooey/copilot_integrations/types/create_stream_request_selected_model.py
new file mode 100644
index 0000000..765029f
--- /dev/null
+++ b/src/gooey/copilot_integrations/types/create_stream_request_selected_model.py
@@ -0,0 +1,39 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+CreateStreamRequestSelectedModel = typing.Union[
+ typing.Literal[
+ "gpt_4_o",
+ "gpt_4_turbo_vision",
+ "gpt_4_vision",
+ "gpt_4_turbo",
+ "gpt_4",
+ "gpt_4_32k",
+ "gpt_3_5_turbo",
+ "gpt_3_5_turbo_16k",
+ "gpt_3_5_turbo_instruct",
+ "llama3_70b",
+ "llama3_8b",
+ "llama2_70b_chat",
+ "mixtral_8x7b_instruct_0_1",
+ "gemma_7b_it",
+ "gemini_1_5_pro",
+ "gemini_1_pro_vision",
+ "gemini_1_pro",
+ "palm2_chat",
+ "palm2_text",
+ "claude_3_5_sonnet",
+ "claude_3_opus",
+ "claude_3_sonnet",
+ "claude_3_haiku",
+ "sea_lion_7b_instruct",
+ "text_davinci_003",
+ "text_davinci_002",
+ "code_davinci_002",
+ "text_curie_001",
+ "text_babbage_001",
+ "text_ada_001",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/copilot_integrations/types/create_stream_request_translation_model.py b/src/gooey/copilot_integrations/types/create_stream_request_translation_model.py
new file mode 100644
index 0000000..3876937
--- /dev/null
+++ b/src/gooey/copilot_integrations/types/create_stream_request_translation_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+CreateStreamRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any]
diff --git a/src/gooey/copilot_integrations/types/create_stream_request_tts_provider.py b/src/gooey/copilot_integrations/types/create_stream_request_tts_provider.py
new file mode 100644
index 0000000..cad602d
--- /dev/null
+++ b/src/gooey/copilot_integrations/types/create_stream_request_tts_provider.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+CreateStreamRequestTtsProvider = typing.Union[
+ typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any
+]
diff --git a/src/gooey/copilot_integrations/types/video_bots_stream_response.py b/src/gooey/copilot_integrations/types/video_bots_stream_response.py
new file mode 100644
index 0000000..a8f1ad1
--- /dev/null
+++ b/src/gooey/copilot_integrations/types/video_bots_stream_response.py
@@ -0,0 +1,11 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+from ...types.conversation_start import ConversationStart
+from ...types.final_response import FinalResponse
+from ...types.message_part import MessagePart
+from ...types.run_start import RunStart
+from ...types.stream_error import StreamError
+
+VideoBotsStreamResponse = typing.Union[ConversationStart, RunStart, MessagePart, FinalResponse, StreamError]
diff --git a/src/gooey/core/__init__.py b/src/gooey/core/__init__.py
new file mode 100644
index 0000000..58ad52a
--- /dev/null
+++ b/src/gooey/core/__init__.py
@@ -0,0 +1,30 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .api_error import ApiError
+from .client_wrapper import AsyncClientWrapper, BaseClientWrapper, SyncClientWrapper
+from .datetime_utils import serialize_datetime
+from .file import File, convert_file_dict_to_httpx_tuples
+from .http_client import AsyncHttpClient, HttpClient
+from .jsonable_encoder import jsonable_encoder
+from .pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .query_encoder import encode_query
+from .remove_none_from_dict import remove_none_from_dict
+from .request_options import RequestOptions
+
+__all__ = [
+ "ApiError",
+ "AsyncClientWrapper",
+ "AsyncHttpClient",
+ "BaseClientWrapper",
+ "File",
+ "HttpClient",
+ "RequestOptions",
+ "SyncClientWrapper",
+ "convert_file_dict_to_httpx_tuples",
+ "deep_union_pydantic_dicts",
+ "encode_query",
+ "jsonable_encoder",
+ "pydantic_v1",
+ "remove_none_from_dict",
+ "serialize_datetime",
+]
diff --git a/src/gooey/core/api_error.py b/src/gooey/core/api_error.py
new file mode 100644
index 0000000..2e9fc54
--- /dev/null
+++ b/src/gooey/core/api_error.py
@@ -0,0 +1,15 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+
+class ApiError(Exception):
+ status_code: typing.Optional[int]
+ body: typing.Any
+
+ def __init__(self, *, status_code: typing.Optional[int] = None, body: typing.Any = None):
+ self.status_code = status_code
+ self.body = body
+
+ def __str__(self) -> str:
+ return f"status_code: {self.status_code}, body: {self.body}"
diff --git a/src/gooey/core/client_wrapper.py b/src/gooey/core/client_wrapper.py
new file mode 100644
index 0000000..618f7f9
--- /dev/null
+++ b/src/gooey/core/client_wrapper.py
@@ -0,0 +1,68 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import httpx
+
+from .http_client import AsyncHttpClient, HttpClient
+
+
+class BaseClientWrapper:
+ def __init__(
+ self, *, authorization: typing.Optional[str] = None, base_url: str, timeout: typing.Optional[float] = None
+ ):
+ self._authorization = authorization
+ self._base_url = base_url
+ self._timeout = timeout
+
+ def get_headers(self) -> typing.Dict[str, str]:
+ headers: typing.Dict[str, str] = {
+ "X-Fern-Language": "Python",
+ "X-Fern-SDK-Name": "gooey",
+ "X-Fern-SDK-Version": "0.0.0",
+ }
+ if self._authorization is not None:
+ headers["Authorization"] = self._authorization
+ return headers
+
+ def get_base_url(self) -> str:
+ return self._base_url
+
+ def get_timeout(self) -> typing.Optional[float]:
+ return self._timeout
+
+
+class SyncClientWrapper(BaseClientWrapper):
+ def __init__(
+ self,
+ *,
+ authorization: typing.Optional[str] = None,
+ base_url: str,
+ timeout: typing.Optional[float] = None,
+ httpx_client: httpx.Client
+ ):
+ super().__init__(authorization=authorization, base_url=base_url, timeout=timeout)
+ self.httpx_client = HttpClient(
+ httpx_client=httpx_client,
+ base_headers=self.get_headers(),
+ base_timeout=self.get_timeout(),
+ base_url=self.get_base_url(),
+ )
+
+
+class AsyncClientWrapper(BaseClientWrapper):
+ def __init__(
+ self,
+ *,
+ authorization: typing.Optional[str] = None,
+ base_url: str,
+ timeout: typing.Optional[float] = None,
+ httpx_client: httpx.AsyncClient
+ ):
+ super().__init__(authorization=authorization, base_url=base_url, timeout=timeout)
+ self.httpx_client = AsyncHttpClient(
+ httpx_client=httpx_client,
+ base_headers=self.get_headers(),
+ base_timeout=self.get_timeout(),
+ base_url=self.get_base_url(),
+ )
diff --git a/src/gooey/core/datetime_utils.py b/src/gooey/core/datetime_utils.py
new file mode 100644
index 0000000..7c9864a
--- /dev/null
+++ b/src/gooey/core/datetime_utils.py
@@ -0,0 +1,28 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+
+
+def serialize_datetime(v: dt.datetime) -> str:
+ """
+ Serialize a datetime including timezone info.
+
+ Uses the timezone info provided if present, otherwise uses the current runtime's timezone info.
+
+ UTC datetimes end in "Z" while all other timezones are represented as offset from UTC, e.g. +05:00.
+ """
+
+ def _serialize_zoned_datetime(v: dt.datetime) -> str:
+ if v.tzinfo is not None and v.tzinfo.tzname(None) == dt.timezone.utc.tzname(None):
+ # UTC is a special case where we use "Z" at the end instead of "+00:00"
+ return v.isoformat().replace("+00:00", "Z")
+ else:
+ # Delegate to the typical +/- offset format
+ return v.isoformat()
+
+ if v.tzinfo is not None:
+ return _serialize_zoned_datetime(v)
+ else:
+ local_tz = dt.datetime.now().astimezone().tzinfo
+ localized_dt = v.replace(tzinfo=local_tz)
+ return _serialize_zoned_datetime(localized_dt)
diff --git a/src/gooey/core/file.py b/src/gooey/core/file.py
new file mode 100644
index 0000000..cb0d40b
--- /dev/null
+++ b/src/gooey/core/file.py
@@ -0,0 +1,38 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+# File typing inspired by the flexibility of types within the httpx library
+# https://github.com/encode/httpx/blob/master/httpx/_types.py
+FileContent = typing.Union[typing.IO[bytes], bytes, str]
+File = typing.Union[
+ # file (or bytes)
+ FileContent,
+ # (filename, file (or bytes))
+ typing.Tuple[typing.Optional[str], FileContent],
+ # (filename, file (or bytes), content_type)
+ typing.Tuple[typing.Optional[str], FileContent, typing.Optional[str]],
+ # (filename, file (or bytes), content_type, headers)
+ typing.Tuple[typing.Optional[str], FileContent, typing.Optional[str], typing.Mapping[str, str]],
+]
+
+
+def convert_file_dict_to_httpx_tuples(
+ d: typing.Dict[str, typing.Union[File, typing.List[File]]]
+) -> typing.List[typing.Tuple[str, File]]:
+ """
+ The format we use is a list of tuples, where the first element is the
+ name of the file and the second is the file object. Typically HTTPX wants
+ a dict, but to be able to send lists of files, you have to use the list
+ approach (which also works for non-lists)
+ https://github.com/encode/httpx/pull/1032
+ """
+
+ httpx_tuples = []
+ for key, file_like in d.items():
+ if isinstance(file_like, list):
+ for file_like_item in file_like:
+ httpx_tuples.append((key, file_like_item))
+ else:
+ httpx_tuples.append((key, file_like))
+ return httpx_tuples
diff --git a/src/gooey/core/http_client.py b/src/gooey/core/http_client.py
new file mode 100644
index 0000000..9333d8a
--- /dev/null
+++ b/src/gooey/core/http_client.py
@@ -0,0 +1,475 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import asyncio
+import email.utils
+import json
+import re
+import time
+import typing
+import urllib.parse
+from contextlib import asynccontextmanager, contextmanager
+from random import random
+
+import httpx
+
+from .file import File, convert_file_dict_to_httpx_tuples
+from .jsonable_encoder import jsonable_encoder
+from .query_encoder import encode_query
+from .remove_none_from_dict import remove_none_from_dict
+from .request_options import RequestOptions
+
+INITIAL_RETRY_DELAY_SECONDS = 0.5
+MAX_RETRY_DELAY_SECONDS = 10
+MAX_RETRY_DELAY_SECONDS_FROM_HEADER = 30
+
+
+def _parse_retry_after(response_headers: httpx.Headers) -> typing.Optional[float]:
+ """
+ This function parses the `Retry-After` header in a HTTP response and returns the number of seconds to wait.
+
+ Inspired by the urllib3 retry implementation.
+ """
+ retry_after_ms = response_headers.get("retry-after-ms")
+ if retry_after_ms is not None:
+ try:
+ return int(retry_after_ms) / 1000 if retry_after_ms > 0 else 0
+ except Exception:
+ pass
+
+ retry_after = response_headers.get("retry-after")
+ if retry_after is None:
+ return None
+
+ # Attempt to parse the header as an int.
+ if re.match(r"^\s*[0-9]+\s*$", retry_after):
+ seconds = float(retry_after)
+ # Fallback to parsing it as a date.
+ else:
+ retry_date_tuple = email.utils.parsedate_tz(retry_after)
+ if retry_date_tuple is None:
+ return None
+ if retry_date_tuple[9] is None: # Python 2
+ # Assume UTC if no timezone was specified
+ # On Python2.7, parsedate_tz returns None for a timezone offset
+ # instead of 0 if no timezone is given, where mktime_tz treats
+ # a None timezone offset as local time.
+ retry_date_tuple = retry_date_tuple[:9] + (0,) + retry_date_tuple[10:]
+
+ retry_date = email.utils.mktime_tz(retry_date_tuple)
+ seconds = retry_date - time.time()
+
+ if seconds < 0:
+ seconds = 0
+
+ return seconds
+
+
+def _retry_timeout(response: httpx.Response, retries: int) -> float:
+ """
+ Determine the amount of time to wait before retrying a request.
+ This function begins by trying to parse a retry-after header from the response, and then proceeds to use exponential backoff
+ with a jitter to determine the number of seconds to wait.
+ """
+
+ # If the API asks us to wait a certain amount of time (and it's a reasonable amount), just do what it says.
+ retry_after = _parse_retry_after(response.headers)
+ if retry_after is not None and retry_after <= MAX_RETRY_DELAY_SECONDS_FROM_HEADER:
+ return retry_after
+
+ # Apply exponential backoff, capped at MAX_RETRY_DELAY_SECONDS.
+ retry_delay = min(INITIAL_RETRY_DELAY_SECONDS * pow(2.0, retries), MAX_RETRY_DELAY_SECONDS)
+
+ # Add a randomness / jitter to the retry delay to avoid overwhelming the server with retries.
+ timeout = retry_delay * (1 - 0.25 * random())
+ return timeout if timeout >= 0 else 0
+
+
+def _should_retry(response: httpx.Response) -> bool:
+ retriable_400s = [429, 408, 409]
+ return response.status_code >= 500 or response.status_code in retriable_400s
+
+
+def remove_omit_from_dict(
+ original: typing.Dict[str, typing.Optional[typing.Any]], omit: typing.Optional[typing.Any]
+) -> typing.Dict[str, typing.Any]:
+ if omit is None:
+ return original
+ new: typing.Dict[str, typing.Any] = {}
+ for key, value in original.items():
+ if value is not omit:
+ new[key] = value
+ return new
+
+
+def maybe_filter_request_body(
+ data: typing.Optional[typing.Any],
+ request_options: typing.Optional[RequestOptions],
+ omit: typing.Optional[typing.Any],
+) -> typing.Optional[typing.Any]:
+ if data is None:
+ return (
+ jsonable_encoder(request_options.get("additional_body_parameters", {})) or {}
+ if request_options is not None
+ else None
+ )
+ elif not isinstance(data, typing.Mapping):
+ data_content = jsonable_encoder(data)
+ else:
+ data_content = {
+ **(jsonable_encoder(remove_omit_from_dict(data, omit))), # type: ignore
+ **(
+ jsonable_encoder(request_options.get("additional_body_parameters", {})) or {}
+ if request_options is not None
+ else {}
+ ),
+ }
+ return data_content
+
+
+# Abstracted out for testing purposes
+def get_request_body(
+ *,
+ json: typing.Optional[typing.Any],
+ data: typing.Optional[typing.Any],
+ request_options: typing.Optional[RequestOptions],
+ omit: typing.Optional[typing.Any],
+) -> typing.Tuple[typing.Optional[typing.Any], typing.Optional[typing.Any]]:
+ json_body = None
+ data_body = None
+ if data is not None:
+ data_body = maybe_filter_request_body(data, request_options, omit)
+ else:
+ # If both data and json are None, we send json data in the event extra properties are specified
+ json_body = maybe_filter_request_body(json, request_options, omit)
+
+ return json_body, data_body
+
+
+class HttpClient:
+ def __init__(
+ self,
+ *,
+ httpx_client: httpx.Client,
+ base_timeout: typing.Optional[float],
+ base_headers: typing.Dict[str, str],
+ base_url: typing.Optional[str] = None,
+ ):
+ self.base_url = base_url
+ self.base_timeout = base_timeout
+ self.base_headers = base_headers
+ self.httpx_client = httpx_client
+
+ def get_base_url(self, maybe_base_url: typing.Optional[str]) -> str:
+ base_url = self.base_url if maybe_base_url is None else maybe_base_url
+ if base_url is None:
+ raise ValueError("A base_url is required to make this request, please provide one and try again.")
+ return base_url
+
+ def request(
+ self,
+ path: typing.Optional[str] = None,
+ *,
+ method: str,
+ base_url: typing.Optional[str] = None,
+ params: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ json: typing.Optional[typing.Any] = None,
+ data: typing.Optional[typing.Any] = None,
+ content: typing.Optional[typing.Union[bytes, typing.Iterator[bytes], typing.AsyncIterator[bytes]]] = None,
+ files: typing.Optional[typing.Dict[str, typing.Optional[typing.Union[File, typing.List[File]]]]] = None,
+ headers: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ retries: int = 0,
+ omit: typing.Optional[typing.Any] = None,
+ ) -> httpx.Response:
+ base_url = self.get_base_url(base_url)
+ timeout = (
+ request_options.get("timeout_in_seconds")
+ if request_options is not None and request_options.get("timeout_in_seconds") is not None
+ else self.base_timeout
+ )
+
+ json_body, data_body = get_request_body(json=json, data=data, request_options=request_options, omit=omit)
+
+ response = self.httpx_client.request(
+ method=method,
+ url=urllib.parse.urljoin(f"{base_url}/", path),
+ headers=jsonable_encoder(
+ remove_none_from_dict(
+ {
+ **self.base_headers,
+ **(headers if headers is not None else {}),
+ **(request_options.get("additional_headers", {}) or {} if request_options is not None else {}),
+ }
+ )
+ ),
+ params=encode_query(
+ jsonable_encoder(
+ remove_none_from_dict(
+ remove_omit_from_dict(
+ {
+ **(params if params is not None else {}),
+ **(
+ request_options.get("additional_query_parameters", {}) or {}
+ if request_options is not None
+ else {}
+ ),
+ },
+ omit,
+ )
+ )
+ )
+ ),
+ json=json_body,
+ data=data_body,
+ content=content,
+ files=convert_file_dict_to_httpx_tuples(remove_none_from_dict(files)) if files is not None else None,
+ timeout=timeout,
+ )
+
+ max_retries: int = request_options.get("max_retries", 0) if request_options is not None else 0
+ if _should_retry(response=response):
+ if max_retries > retries:
+ time.sleep(_retry_timeout(response=response, retries=retries))
+ return self.request(
+ path=path,
+ method=method,
+ base_url=base_url,
+ params=params,
+ json=json,
+ content=content,
+ files=files,
+ headers=headers,
+ request_options=request_options,
+ retries=retries + 1,
+ omit=omit,
+ )
+
+ return response
+
+ @contextmanager
+ def stream(
+ self,
+ path: typing.Optional[str] = None,
+ *,
+ method: str,
+ base_url: typing.Optional[str] = None,
+ params: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ json: typing.Optional[typing.Any] = None,
+ data: typing.Optional[typing.Any] = None,
+ content: typing.Optional[typing.Union[bytes, typing.Iterator[bytes], typing.AsyncIterator[bytes]]] = None,
+ files: typing.Optional[typing.Dict[str, typing.Optional[typing.Union[File, typing.List[File]]]]] = None,
+ headers: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ retries: int = 0,
+ omit: typing.Optional[typing.Any] = None,
+ ) -> typing.Iterator[httpx.Response]:
+ base_url = self.get_base_url(base_url)
+ timeout = (
+ request_options.get("timeout_in_seconds")
+ if request_options is not None and request_options.get("timeout_in_seconds") is not None
+ else self.base_timeout
+ )
+
+ json_body, data_body = get_request_body(json=json, data=data, request_options=request_options, omit=omit)
+
+ with self.httpx_client.stream(
+ method=method,
+ url=urllib.parse.urljoin(f"{base_url}/", path),
+ headers=jsonable_encoder(
+ remove_none_from_dict(
+ {
+ **self.base_headers,
+ **(headers if headers is not None else {}),
+ **(request_options.get("additional_headers", {}) if request_options is not None else {}),
+ }
+ )
+ ),
+ params=encode_query(
+ jsonable_encoder(
+ remove_none_from_dict(
+ remove_omit_from_dict(
+ {
+ **(params if params is not None else {}),
+ **(
+ request_options.get("additional_query_parameters", {})
+ if request_options is not None
+ else {}
+ ),
+ },
+ omit,
+ )
+ )
+ )
+ ),
+ json=json_body,
+ data=data_body,
+ content=content,
+ files=convert_file_dict_to_httpx_tuples(remove_none_from_dict(files)) if files is not None else None,
+ timeout=timeout,
+ ) as stream:
+ yield stream
+
+
+class AsyncHttpClient:
+ def __init__(
+ self,
+ *,
+ httpx_client: httpx.AsyncClient,
+ base_timeout: typing.Optional[float],
+ base_headers: typing.Dict[str, str],
+ base_url: typing.Optional[str] = None,
+ ):
+ self.base_url = base_url
+ self.base_timeout = base_timeout
+ self.base_headers = base_headers
+ self.httpx_client = httpx_client
+
+ def get_base_url(self, maybe_base_url: typing.Optional[str]) -> str:
+ base_url = self.base_url if maybe_base_url is None else maybe_base_url
+ if base_url is None:
+ raise ValueError("A base_url is required to make this request, please provide one and try again.")
+ return base_url
+
+ async def request(
+ self,
+ path: typing.Optional[str] = None,
+ *,
+ method: str,
+ base_url: typing.Optional[str] = None,
+ params: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ json: typing.Optional[typing.Any] = None,
+ data: typing.Optional[typing.Any] = None,
+ content: typing.Optional[typing.Union[bytes, typing.Iterator[bytes], typing.AsyncIterator[bytes]]] = None,
+ files: typing.Optional[typing.Dict[str, typing.Optional[typing.Union[File, typing.List[File]]]]] = None,
+ headers: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ retries: int = 0,
+ omit: typing.Optional[typing.Any] = None,
+ ) -> httpx.Response:
+ base_url = self.get_base_url(base_url)
+ timeout = (
+ request_options.get("timeout_in_seconds")
+ if request_options is not None and request_options.get("timeout_in_seconds") is not None
+ else self.base_timeout
+ )
+
+ json_body, data_body = get_request_body(json=json, data=data, request_options=request_options, omit=omit)
+
+ # Add the input to each of these and do None-safety checks
+ response = await self.httpx_client.request(
+ method=method,
+ url=urllib.parse.urljoin(f"{base_url}/", path),
+ headers=jsonable_encoder(
+ remove_none_from_dict(
+ {
+ **self.base_headers,
+ **(headers if headers is not None else {}),
+ **(request_options.get("additional_headers", {}) or {} if request_options is not None else {}),
+ }
+ )
+ ),
+ params=encode_query(
+ jsonable_encoder(
+ remove_none_from_dict(
+ remove_omit_from_dict(
+ {
+ **(params if params is not None else {}),
+ **(
+ request_options.get("additional_query_parameters", {}) or {}
+ if request_options is not None
+ else {}
+ ),
+ },
+ omit,
+ )
+ )
+ )
+ ),
+ json=json_body,
+ data=data_body,
+ content=content,
+ files=convert_file_dict_to_httpx_tuples(remove_none_from_dict(files)) if files is not None else None,
+ timeout=timeout,
+ )
+
+ max_retries: int = request_options.get("max_retries", 0) if request_options is not None else 0
+ if _should_retry(response=response):
+ if max_retries > retries:
+ await asyncio.sleep(_retry_timeout(response=response, retries=retries))
+ return await self.request(
+ path=path,
+ method=method,
+ base_url=base_url,
+ params=params,
+ json=json,
+ content=content,
+ files=files,
+ headers=headers,
+ request_options=request_options,
+ retries=retries + 1,
+ omit=omit,
+ )
+ return response
+
+ @asynccontextmanager
+ async def stream(
+ self,
+ path: typing.Optional[str] = None,
+ *,
+ method: str,
+ base_url: typing.Optional[str] = None,
+ params: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ json: typing.Optional[typing.Any] = None,
+ data: typing.Optional[typing.Any] = None,
+ content: typing.Optional[typing.Union[bytes, typing.Iterator[bytes], typing.AsyncIterator[bytes]]] = None,
+ files: typing.Optional[typing.Dict[str, typing.Optional[typing.Union[File, typing.List[File]]]]] = None,
+ headers: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ retries: int = 0,
+ omit: typing.Optional[typing.Any] = None,
+ ) -> typing.AsyncIterator[httpx.Response]:
+ base_url = self.get_base_url(base_url)
+ timeout = (
+ request_options.get("timeout_in_seconds")
+ if request_options is not None and request_options.get("timeout_in_seconds") is not None
+ else self.base_timeout
+ )
+
+ json_body, data_body = get_request_body(json=json, data=data, request_options=request_options, omit=omit)
+
+ async with self.httpx_client.stream(
+ method=method,
+ url=urllib.parse.urljoin(f"{base_url}/", path),
+ headers=jsonable_encoder(
+ remove_none_from_dict(
+ {
+ **self.base_headers,
+ **(headers if headers is not None else {}),
+ **(request_options.get("additional_headers", {}) if request_options is not None else {}),
+ }
+ )
+ ),
+ params=encode_query(
+ jsonable_encoder(
+ remove_none_from_dict(
+ remove_omit_from_dict(
+ {
+ **(params if params is not None else {}),
+ **(
+ request_options.get("additional_query_parameters", {})
+ if request_options is not None
+ else {}
+ ),
+ },
+ omit=omit,
+ )
+ )
+ )
+ ),
+ json=json_body,
+ data=data_body,
+ content=content,
+ files=convert_file_dict_to_httpx_tuples(remove_none_from_dict(files)) if files is not None else None,
+ timeout=timeout,
+ ) as stream:
+ yield stream
diff --git a/src/gooey/core/jsonable_encoder.py b/src/gooey/core/jsonable_encoder.py
new file mode 100644
index 0000000..f09aaf6
--- /dev/null
+++ b/src/gooey/core/jsonable_encoder.py
@@ -0,0 +1,102 @@
+# This file was auto-generated by Fern from our API Definition.
+
+"""
+jsonable_encoder converts a Python object to a JSON-friendly dict
+(e.g. datetimes to strings, Pydantic models to dicts).
+
+Taken from FastAPI, and made a bit simpler
+https://github.com/tiangolo/fastapi/blob/master/fastapi/encoders.py
+"""
+
+import base64
+import dataclasses
+import datetime as dt
+from collections import defaultdict
+from enum import Enum
+from pathlib import PurePath
+from types import GeneratorType
+from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
+
+from .datetime_utils import serialize_datetime
+from .pydantic_utilities import pydantic_v1
+
+SetIntStr = Set[Union[int, str]]
+DictIntStrAny = Dict[Union[int, str], Any]
+
+
+def generate_encoders_by_class_tuples(
+ type_encoder_map: Dict[Any, Callable[[Any], Any]]
+) -> Dict[Callable[[Any], Any], Tuple[Any, ...]]:
+ encoders_by_class_tuples: Dict[Callable[[Any], Any], Tuple[Any, ...]] = defaultdict(tuple)
+ for type_, encoder in type_encoder_map.items():
+ encoders_by_class_tuples[encoder] += (type_,)
+ return encoders_by_class_tuples
+
+
+encoders_by_class_tuples = generate_encoders_by_class_tuples(pydantic_v1.json.ENCODERS_BY_TYPE)
+
+
+def jsonable_encoder(obj: Any, custom_encoder: Optional[Dict[Any, Callable[[Any], Any]]] = None) -> Any:
+ custom_encoder = custom_encoder or {}
+ if custom_encoder:
+ if type(obj) in custom_encoder:
+ return custom_encoder[type(obj)](obj)
+ else:
+ for encoder_type, encoder_instance in custom_encoder.items():
+ if isinstance(obj, encoder_type):
+ return encoder_instance(obj)
+ if isinstance(obj, pydantic_v1.BaseModel):
+ encoder = getattr(obj.__config__, "json_encoders", {})
+ if custom_encoder:
+ encoder.update(custom_encoder)
+ obj_dict = obj.dict(by_alias=True)
+ if "__root__" in obj_dict:
+ obj_dict = obj_dict["__root__"]
+ return jsonable_encoder(obj_dict, custom_encoder=encoder)
+ if dataclasses.is_dataclass(obj):
+ obj_dict = dataclasses.asdict(obj)
+ return jsonable_encoder(obj_dict, custom_encoder=custom_encoder)
+ if isinstance(obj, bytes):
+ return base64.b64encode(obj).decode("utf-8")
+ if isinstance(obj, Enum):
+ return obj.value
+ if isinstance(obj, PurePath):
+ return str(obj)
+ if isinstance(obj, (str, int, float, type(None))):
+ return obj
+ if isinstance(obj, dt.datetime):
+ return serialize_datetime(obj)
+ if isinstance(obj, dt.date):
+ return str(obj)
+ if isinstance(obj, dict):
+ encoded_dict = {}
+ allowed_keys = set(obj.keys())
+ for key, value in obj.items():
+ if key in allowed_keys:
+ encoded_key = jsonable_encoder(key, custom_encoder=custom_encoder)
+ encoded_value = jsonable_encoder(value, custom_encoder=custom_encoder)
+ encoded_dict[encoded_key] = encoded_value
+ return encoded_dict
+ if isinstance(obj, (list, set, frozenset, GeneratorType, tuple)):
+ encoded_list = []
+ for item in obj:
+ encoded_list.append(jsonable_encoder(item, custom_encoder=custom_encoder))
+ return encoded_list
+
+ if type(obj) in pydantic_v1.json.ENCODERS_BY_TYPE:
+ return pydantic_v1.json.ENCODERS_BY_TYPE[type(obj)](obj)
+ for encoder, classes_tuple in encoders_by_class_tuples.items():
+ if isinstance(obj, classes_tuple):
+ return encoder(obj)
+
+ try:
+ data = dict(obj)
+ except Exception as e:
+ errors: List[Exception] = []
+ errors.append(e)
+ try:
+ data = vars(obj)
+ except Exception as e:
+ errors.append(e)
+ raise ValueError(errors) from e
+ return jsonable_encoder(data, custom_encoder=custom_encoder)
diff --git a/src/gooey/core/pydantic_utilities.py b/src/gooey/core/pydantic_utilities.py
new file mode 100644
index 0000000..a72c1a5
--- /dev/null
+++ b/src/gooey/core/pydantic_utilities.py
@@ -0,0 +1,28 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+
+IS_PYDANTIC_V2 = pydantic.VERSION.startswith("2.")
+
+if IS_PYDANTIC_V2:
+ import pydantic.v1 as pydantic_v1 # type: ignore # nopycln: import
+else:
+ import pydantic as pydantic_v1 # type: ignore # nopycln: import
+
+
+def deep_union_pydantic_dicts(
+ source: typing.Dict[str, typing.Any], destination: typing.Dict[str, typing.Any]
+) -> typing.Dict[str, typing.Any]:
+ for key, value in source.items():
+ if isinstance(value, dict):
+ node = destination.setdefault(key, {})
+ deep_union_pydantic_dicts(value, node)
+ else:
+ destination[key] = value
+
+ return destination
+
+
+__all__ = ["pydantic_v1"]
diff --git a/src/gooey/core/query_encoder.py b/src/gooey/core/query_encoder.py
new file mode 100644
index 0000000..1f5f766
--- /dev/null
+++ b/src/gooey/core/query_encoder.py
@@ -0,0 +1,33 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from collections import ChainMap
+from typing import Any, Dict, Optional
+
+from .pydantic_utilities import pydantic_v1
+
+
+# Flattens dicts to be of the form {"key[subkey][subkey2]": value} where value is not a dict
+def traverse_query_dict(dict_flat: Dict[str, Any], key_prefix: Optional[str] = None) -> Dict[str, Any]:
+ result = {}
+ for k, v in dict_flat.items():
+ key = f"{key_prefix}[{k}]" if key_prefix is not None else k
+ if isinstance(v, dict):
+ result.update(traverse_query_dict(v, key))
+ else:
+ result[key] = v
+ return result
+
+
+def single_query_encoder(query_key: str, query_value: Any) -> Dict[str, Any]:
+ if isinstance(query_value, pydantic_v1.BaseModel) or isinstance(query_value, dict):
+ if isinstance(query_value, pydantic_v1.BaseModel):
+ obj_dict = query_value.dict(by_alias=True)
+ else:
+ obj_dict = query_value
+ return traverse_query_dict(obj_dict, query_key)
+
+ return {query_key: query_value}
+
+
+def encode_query(query: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
+ return dict(ChainMap(*[single_query_encoder(k, v) for k, v in query.items()])) if query is not None else None
diff --git a/src/gooey/core/remove_none_from_dict.py b/src/gooey/core/remove_none_from_dict.py
new file mode 100644
index 0000000..c229814
--- /dev/null
+++ b/src/gooey/core/remove_none_from_dict.py
@@ -0,0 +1,11 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from typing import Any, Dict, Mapping, Optional
+
+
+def remove_none_from_dict(original: Mapping[str, Optional[Any]]) -> Dict[str, Any]:
+ new: Dict[str, Any] = {}
+ for key, value in original.items():
+ if value is not None:
+ new[key] = value
+ return new
diff --git a/src/gooey/core/request_options.py b/src/gooey/core/request_options.py
new file mode 100644
index 0000000..d0bf0db
--- /dev/null
+++ b/src/gooey/core/request_options.py
@@ -0,0 +1,32 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+try:
+ from typing import NotRequired # type: ignore
+except ImportError:
+ from typing_extensions import NotRequired
+
+
+class RequestOptions(typing.TypedDict, total=False):
+ """
+ Additional options for request-specific configuration when calling APIs via the SDK.
+ This is used primarily as an optional final parameter for service functions.
+
+ Attributes:
+ - timeout_in_seconds: int. The number of seconds to await an API call before timing out.
+
+ - max_retries: int. The max number of retries to attempt if the API call fails.
+
+ - additional_headers: typing.Dict[str, typing.Any]. A dictionary containing additional parameters to spread into the request's header dict
+
+ - additional_query_parameters: typing.Dict[str, typing.Any]. A dictionary containing additional parameters to spread into the request's query parameters dict
+
+ - additional_body_parameters: typing.Dict[str, typing.Any]. A dictionary containing additional parameters to spread into the request's body parameters dict
+ """
+
+ timeout_in_seconds: NotRequired[int]
+ max_retries: NotRequired[int]
+ additional_headers: NotRequired[typing.Dict[str, typing.Any]]
+ additional_query_parameters: NotRequired[typing.Dict[str, typing.Any]]
+ additional_body_parameters: NotRequired[typing.Dict[str, typing.Any]]
diff --git a/src/gooey/create_a_perfect_seo_optimized_title_paragraph/__init__.py b/src/gooey/create_a_perfect_seo_optimized_title_paragraph/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/create_a_perfect_seo_optimized_title_paragraph/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/create_a_perfect_seo_optimized_title_paragraph/client.py b/src/gooey/create_a_perfect_seo_optimized_title_paragraph/client.py
new file mode 100644
index 0000000..bfbb27a
--- /dev/null
+++ b/src/gooey/create_a_perfect_seo_optimized_title_paragraph/client.py
@@ -0,0 +1,716 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import pydantic_v1
+from ..core.request_options import RequestOptions
+from ..errors.internal_server_error import InternalServerError
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3
+from ..types.failed_reponse_model_v2 import FailedReponseModelV2
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.run_settings import RunSettings
+from ..types.seo_summary_page_request_selected_model import SeoSummaryPageRequestSelectedModel
+from ..types.seo_summary_page_response import SeoSummaryPageResponse
+from ..types.seo_summary_page_status_response import SeoSummaryPageStatusResponse
+from ..types.serp_search_location import SerpSearchLocation
+from ..types.serp_search_type import SerpSearchType
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class CreateAPerfectSeoOptimizedTitleParagraphClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def seo_summary(
+ self,
+ *,
+ search_query: str,
+ keywords: str,
+ title: str,
+ company_url: str,
+ serp_search_location: typing.Optional[SerpSearchLocation] = OMIT,
+ scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
+ serp_search_type: typing.Optional[SerpSearchType] = OMIT,
+ scaleserp_search_field: typing.Optional[str] = OMIT,
+ task_instructions: typing.Optional[str] = OMIT,
+ enable_html: typing.Optional[bool] = OMIT,
+ selected_model: typing.Optional[SeoSummaryPageRequestSelectedModel] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ max_search_urls: typing.Optional[int] = OMIT,
+ enable_crosslinks: typing.Optional[bool] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> SeoSummaryPageResponse:
+ """
+ Parameters
+ ----------
+ search_query : str
+
+ keywords : str
+
+ title : str
+
+ company_url : str
+
+ serp_search_location : typing.Optional[SerpSearchLocation]
+
+ scaleserp_locations : typing.Optional[typing.Sequence[str]]
+ DEPRECATED: use `serp_search_location` instead
+
+ serp_search_type : typing.Optional[SerpSearchType]
+
+ scaleserp_search_field : typing.Optional[str]
+ DEPRECATED: use `serp_search_type` instead
+
+ task_instructions : typing.Optional[str]
+
+ enable_html : typing.Optional[bool]
+
+ selected_model : typing.Optional[SeoSummaryPageRequestSelectedModel]
+
+ sampling_temperature : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ avoid_repetition : typing.Optional[bool]
+
+ max_search_urls : typing.Optional[int]
+
+ enable_crosslinks : typing.Optional[bool]
+
+ seed : typing.Optional[int]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ SeoSummaryPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.create_a_perfect_seo_optimized_title_paragraph.seo_summary(
+ search_query="search_query",
+ keywords="keywords",
+ title="title",
+ company_url="company_url",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v2/SEOSummary/",
+ method="POST",
+ json={
+ "serp_search_location": serp_search_location,
+ "scaleserp_locations": scaleserp_locations,
+ "serp_search_type": serp_search_type,
+ "scaleserp_search_field": scaleserp_search_field,
+ "search_query": search_query,
+ "keywords": keywords,
+ "title": title,
+ "company_url": company_url,
+ "task_instructions": task_instructions,
+ "enable_html": enable_html,
+ "selected_model": selected_model,
+ "sampling_temperature": sampling_temperature,
+ "max_tokens": max_tokens,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "avoid_repetition": avoid_repetition,
+ "max_search_urls": max_search_urls,
+ "enable_crosslinks": enable_crosslinks,
+ "seed": seed,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(SeoSummaryPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def async_seo_summary(
+ self,
+ *,
+ search_query: str,
+ keywords: str,
+ title: str,
+ company_url: str,
+ serp_search_location: typing.Optional[SerpSearchLocation] = OMIT,
+ scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
+ serp_search_type: typing.Optional[SerpSearchType] = OMIT,
+ scaleserp_search_field: typing.Optional[str] = OMIT,
+ task_instructions: typing.Optional[str] = OMIT,
+ enable_html: typing.Optional[bool] = OMIT,
+ selected_model: typing.Optional[SeoSummaryPageRequestSelectedModel] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ max_search_urls: typing.Optional[int] = OMIT,
+ enable_crosslinks: typing.Optional[bool] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ search_query : str
+
+ keywords : str
+
+ title : str
+
+ company_url : str
+
+ serp_search_location : typing.Optional[SerpSearchLocation]
+
+ scaleserp_locations : typing.Optional[typing.Sequence[str]]
+ DEPRECATED: use `serp_search_location` instead
+
+ serp_search_type : typing.Optional[SerpSearchType]
+
+ scaleserp_search_field : typing.Optional[str]
+ DEPRECATED: use `serp_search_type` instead
+
+ task_instructions : typing.Optional[str]
+
+ enable_html : typing.Optional[bool]
+
+ selected_model : typing.Optional[SeoSummaryPageRequestSelectedModel]
+
+ sampling_temperature : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ avoid_repetition : typing.Optional[bool]
+
+ max_search_urls : typing.Optional[int]
+
+ enable_crosslinks : typing.Optional[bool]
+
+ seed : typing.Optional[int]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.create_a_perfect_seo_optimized_title_paragraph.async_seo_summary(
+ search_query="search_query",
+ keywords="keywords",
+ title="title",
+ company_url="company_url",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/SEOSummary/async/",
+ method="POST",
+ json={
+ "serp_search_location": serp_search_location,
+ "scaleserp_locations": scaleserp_locations,
+ "serp_search_type": serp_search_type,
+ "scaleserp_search_field": scaleserp_search_field,
+ "search_query": search_query,
+ "keywords": keywords,
+ "title": title,
+ "company_url": company_url,
+ "task_instructions": task_instructions,
+ "enable_html": enable_html,
+ "selected_model": selected_model,
+ "sampling_temperature": sampling_temperature,
+ "max_tokens": max_tokens,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "avoid_repetition": avoid_repetition,
+ "max_search_urls": max_search_urls,
+ "enable_crosslinks": enable_crosslinks,
+ "seed": seed,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def status_seo_summary(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> SeoSummaryPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ SeoSummaryPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.create_a_perfect_seo_optimized_title_paragraph.status_seo_summary(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/SEOSummary/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(SeoSummaryPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncCreateAPerfectSeoOptimizedTitleParagraphClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def seo_summary(
+ self,
+ *,
+ search_query: str,
+ keywords: str,
+ title: str,
+ company_url: str,
+ serp_search_location: typing.Optional[SerpSearchLocation] = OMIT,
+ scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
+ serp_search_type: typing.Optional[SerpSearchType] = OMIT,
+ scaleserp_search_field: typing.Optional[str] = OMIT,
+ task_instructions: typing.Optional[str] = OMIT,
+ enable_html: typing.Optional[bool] = OMIT,
+ selected_model: typing.Optional[SeoSummaryPageRequestSelectedModel] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ max_search_urls: typing.Optional[int] = OMIT,
+ enable_crosslinks: typing.Optional[bool] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> SeoSummaryPageResponse:
+ """
+ Parameters
+ ----------
+ search_query : str
+
+ keywords : str
+
+ title : str
+
+ company_url : str
+
+ serp_search_location : typing.Optional[SerpSearchLocation]
+
+ scaleserp_locations : typing.Optional[typing.Sequence[str]]
+ DEPRECATED: use `serp_search_location` instead
+
+ serp_search_type : typing.Optional[SerpSearchType]
+
+ scaleserp_search_field : typing.Optional[str]
+ DEPRECATED: use `serp_search_type` instead
+
+ task_instructions : typing.Optional[str]
+
+ enable_html : typing.Optional[bool]
+
+ selected_model : typing.Optional[SeoSummaryPageRequestSelectedModel]
+
+ sampling_temperature : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ avoid_repetition : typing.Optional[bool]
+
+ max_search_urls : typing.Optional[int]
+
+ enable_crosslinks : typing.Optional[bool]
+
+ seed : typing.Optional[int]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ SeoSummaryPageResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.create_a_perfect_seo_optimized_title_paragraph.seo_summary(
+ search_query="search_query",
+ keywords="keywords",
+ title="title",
+ company_url="company_url",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v2/SEOSummary/",
+ method="POST",
+ json={
+ "serp_search_location": serp_search_location,
+ "scaleserp_locations": scaleserp_locations,
+ "serp_search_type": serp_search_type,
+ "scaleserp_search_field": scaleserp_search_field,
+ "search_query": search_query,
+ "keywords": keywords,
+ "title": title,
+ "company_url": company_url,
+ "task_instructions": task_instructions,
+ "enable_html": enable_html,
+ "selected_model": selected_model,
+ "sampling_temperature": sampling_temperature,
+ "max_tokens": max_tokens,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "avoid_repetition": avoid_repetition,
+ "max_search_urls": max_search_urls,
+ "enable_crosslinks": enable_crosslinks,
+ "seed": seed,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(SeoSummaryPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def async_seo_summary(
+ self,
+ *,
+ search_query: str,
+ keywords: str,
+ title: str,
+ company_url: str,
+ serp_search_location: typing.Optional[SerpSearchLocation] = OMIT,
+ scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
+ serp_search_type: typing.Optional[SerpSearchType] = OMIT,
+ scaleserp_search_field: typing.Optional[str] = OMIT,
+ task_instructions: typing.Optional[str] = OMIT,
+ enable_html: typing.Optional[bool] = OMIT,
+ selected_model: typing.Optional[SeoSummaryPageRequestSelectedModel] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ max_search_urls: typing.Optional[int] = OMIT,
+ enable_crosslinks: typing.Optional[bool] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ search_query : str
+
+ keywords : str
+
+ title : str
+
+ company_url : str
+
+ serp_search_location : typing.Optional[SerpSearchLocation]
+
+ scaleserp_locations : typing.Optional[typing.Sequence[str]]
+ DEPRECATED: use `serp_search_location` instead
+
+ serp_search_type : typing.Optional[SerpSearchType]
+
+ scaleserp_search_field : typing.Optional[str]
+ DEPRECATED: use `serp_search_type` instead
+
+ task_instructions : typing.Optional[str]
+
+ enable_html : typing.Optional[bool]
+
+ selected_model : typing.Optional[SeoSummaryPageRequestSelectedModel]
+
+ sampling_temperature : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ avoid_repetition : typing.Optional[bool]
+
+ max_search_urls : typing.Optional[int]
+
+ enable_crosslinks : typing.Optional[bool]
+
+ seed : typing.Optional[int]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.create_a_perfect_seo_optimized_title_paragraph.async_seo_summary(
+ search_query="search_query",
+ keywords="keywords",
+ title="title",
+ company_url="company_url",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/SEOSummary/async/",
+ method="POST",
+ json={
+ "serp_search_location": serp_search_location,
+ "scaleserp_locations": scaleserp_locations,
+ "serp_search_type": serp_search_type,
+ "scaleserp_search_field": scaleserp_search_field,
+ "search_query": search_query,
+ "keywords": keywords,
+ "title": title,
+ "company_url": company_url,
+ "task_instructions": task_instructions,
+ "enable_html": enable_html,
+ "selected_model": selected_model,
+ "sampling_temperature": sampling_temperature,
+ "max_tokens": max_tokens,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "avoid_repetition": avoid_repetition,
+ "max_search_urls": max_search_urls,
+ "enable_crosslinks": enable_crosslinks,
+ "seed": seed,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def status_seo_summary(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> SeoSummaryPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ SeoSummaryPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.create_a_perfect_seo_optimized_title_paragraph.status_seo_summary(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/SEOSummary/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(SeoSummaryPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/edit_an_image_with_ai_prompt/__init__.py b/src/gooey/edit_an_image_with_ai_prompt/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/edit_an_image_with_ai_prompt/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/edit_an_image_with_ai_prompt/client.py b/src/gooey/edit_an_image_with_ai_prompt/client.py
new file mode 100644
index 0000000..c283d84
--- /dev/null
+++ b/src/gooey/edit_an_image_with_ai_prompt/client.py
@@ -0,0 +1,652 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import pydantic_v1
+from ..core.request_options import RequestOptions
+from ..errors.internal_server_error import InternalServerError
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3
+from ..types.failed_reponse_model_v2 import FailedReponseModelV2
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.img2img_page_request_selected_controlnet_model import Img2ImgPageRequestSelectedControlnetModel
+from ..types.img2img_page_request_selected_model import Img2ImgPageRequestSelectedModel
+from ..types.img2img_page_response import Img2ImgPageResponse
+from ..types.img2img_page_status_response import Img2ImgPageStatusResponse
+from ..types.recipe_function import RecipeFunction
+from ..types.run_settings import RunSettings
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class EditAnImageWithAiPromptClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def img2img(
+ self,
+ *,
+ input_image: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ text_prompt: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[Img2ImgPageRequestSelectedModel] = OMIT,
+ selected_controlnet_model: typing.Optional[Img2ImgPageRequestSelectedControlnetModel] = OMIT,
+ negative_prompt: typing.Optional[str] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[int] = OMIT,
+ output_width: typing.Optional[int] = OMIT,
+ output_height: typing.Optional[int] = OMIT,
+ guidance_scale: typing.Optional[float] = OMIT,
+ prompt_strength: typing.Optional[float] = OMIT,
+ controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ image_guidance_scale: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> Img2ImgPageResponse:
+ """
+ Parameters
+ ----------
+ input_image : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ text_prompt : typing.Optional[str]
+
+ selected_model : typing.Optional[Img2ImgPageRequestSelectedModel]
+
+ selected_controlnet_model : typing.Optional[Img2ImgPageRequestSelectedControlnetModel]
+
+ negative_prompt : typing.Optional[str]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ output_width : typing.Optional[int]
+
+ output_height : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ prompt_strength : typing.Optional[float]
+
+ controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]]
+
+ seed : typing.Optional[int]
+
+ image_guidance_scale : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ Img2ImgPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.edit_an_image_with_ai_prompt.img2img(
+ input_image="input_image",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v2/Img2Img/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "input_image": input_image,
+ "text_prompt": text_prompt,
+ "selected_model": selected_model,
+ "selected_controlnet_model": selected_controlnet_model,
+ "negative_prompt": negative_prompt,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "output_width": output_width,
+ "output_height": output_height,
+ "guidance_scale": guidance_scale,
+ "prompt_strength": prompt_strength,
+ "controlnet_conditioning_scale": controlnet_conditioning_scale,
+ "seed": seed,
+ "image_guidance_scale": image_guidance_scale,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(Img2ImgPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def async_img2img(
+ self,
+ *,
+ input_image: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ text_prompt: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[Img2ImgPageRequestSelectedModel] = OMIT,
+ selected_controlnet_model: typing.Optional[Img2ImgPageRequestSelectedControlnetModel] = OMIT,
+ negative_prompt: typing.Optional[str] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[int] = OMIT,
+ output_width: typing.Optional[int] = OMIT,
+ output_height: typing.Optional[int] = OMIT,
+ guidance_scale: typing.Optional[float] = OMIT,
+ prompt_strength: typing.Optional[float] = OMIT,
+ controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ image_guidance_scale: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ input_image : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ text_prompt : typing.Optional[str]
+
+ selected_model : typing.Optional[Img2ImgPageRequestSelectedModel]
+
+ selected_controlnet_model : typing.Optional[Img2ImgPageRequestSelectedControlnetModel]
+
+ negative_prompt : typing.Optional[str]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ output_width : typing.Optional[int]
+
+ output_height : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ prompt_strength : typing.Optional[float]
+
+ controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]]
+
+ seed : typing.Optional[int]
+
+ image_guidance_scale : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.edit_an_image_with_ai_prompt.async_img2img(
+ input_image="input_image",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/Img2Img/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "input_image": input_image,
+ "text_prompt": text_prompt,
+ "selected_model": selected_model,
+ "selected_controlnet_model": selected_controlnet_model,
+ "negative_prompt": negative_prompt,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "output_width": output_width,
+ "output_height": output_height,
+ "guidance_scale": guidance_scale,
+ "prompt_strength": prompt_strength,
+ "controlnet_conditioning_scale": controlnet_conditioning_scale,
+ "seed": seed,
+ "image_guidance_scale": image_guidance_scale,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def status_img2img(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> Img2ImgPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ Img2ImgPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.edit_an_image_with_ai_prompt.status_img2img(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/Img2Img/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(Img2ImgPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncEditAnImageWithAiPromptClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def img2img(
+ self,
+ *,
+ input_image: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ text_prompt: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[Img2ImgPageRequestSelectedModel] = OMIT,
+ selected_controlnet_model: typing.Optional[Img2ImgPageRequestSelectedControlnetModel] = OMIT,
+ negative_prompt: typing.Optional[str] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[int] = OMIT,
+ output_width: typing.Optional[int] = OMIT,
+ output_height: typing.Optional[int] = OMIT,
+ guidance_scale: typing.Optional[float] = OMIT,
+ prompt_strength: typing.Optional[float] = OMIT,
+ controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ image_guidance_scale: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> Img2ImgPageResponse:
+ """
+ Parameters
+ ----------
+ input_image : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ text_prompt : typing.Optional[str]
+
+ selected_model : typing.Optional[Img2ImgPageRequestSelectedModel]
+
+ selected_controlnet_model : typing.Optional[Img2ImgPageRequestSelectedControlnetModel]
+
+ negative_prompt : typing.Optional[str]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ output_width : typing.Optional[int]
+
+ output_height : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ prompt_strength : typing.Optional[float]
+
+ controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]]
+
+ seed : typing.Optional[int]
+
+ image_guidance_scale : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ Img2ImgPageResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.edit_an_image_with_ai_prompt.img2img(
+ input_image="input_image",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v2/Img2Img/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "input_image": input_image,
+ "text_prompt": text_prompt,
+ "selected_model": selected_model,
+ "selected_controlnet_model": selected_controlnet_model,
+ "negative_prompt": negative_prompt,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "output_width": output_width,
+ "output_height": output_height,
+ "guidance_scale": guidance_scale,
+ "prompt_strength": prompt_strength,
+ "controlnet_conditioning_scale": controlnet_conditioning_scale,
+ "seed": seed,
+ "image_guidance_scale": image_guidance_scale,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(Img2ImgPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def async_img2img(
+ self,
+ *,
+ input_image: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ text_prompt: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[Img2ImgPageRequestSelectedModel] = OMIT,
+ selected_controlnet_model: typing.Optional[Img2ImgPageRequestSelectedControlnetModel] = OMIT,
+ negative_prompt: typing.Optional[str] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[int] = OMIT,
+ output_width: typing.Optional[int] = OMIT,
+ output_height: typing.Optional[int] = OMIT,
+ guidance_scale: typing.Optional[float] = OMIT,
+ prompt_strength: typing.Optional[float] = OMIT,
+ controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ image_guidance_scale: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ input_image : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ text_prompt : typing.Optional[str]
+
+ selected_model : typing.Optional[Img2ImgPageRequestSelectedModel]
+
+ selected_controlnet_model : typing.Optional[Img2ImgPageRequestSelectedControlnetModel]
+
+ negative_prompt : typing.Optional[str]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ output_width : typing.Optional[int]
+
+ output_height : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ prompt_strength : typing.Optional[float]
+
+ controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]]
+
+ seed : typing.Optional[int]
+
+ image_guidance_scale : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.edit_an_image_with_ai_prompt.async_img2img(
+ input_image="input_image",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/Img2Img/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "input_image": input_image,
+ "text_prompt": text_prompt,
+ "selected_model": selected_model,
+ "selected_controlnet_model": selected_controlnet_model,
+ "negative_prompt": negative_prompt,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "output_width": output_width,
+ "output_height": output_height,
+ "guidance_scale": guidance_scale,
+ "prompt_strength": prompt_strength,
+ "controlnet_conditioning_scale": controlnet_conditioning_scale,
+ "seed": seed,
+ "image_guidance_scale": image_guidance_scale,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def status_img2img(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> Img2ImgPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ Img2ImgPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.edit_an_image_with_ai_prompt.status_img2img(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/Img2Img/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(Img2ImgPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/embeddings/__init__.py b/src/gooey/embeddings/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/embeddings/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/embeddings/client.py b/src/gooey/embeddings/client.py
new file mode 100644
index 0000000..a144aa0
--- /dev/null
+++ b/src/gooey/embeddings/client.py
@@ -0,0 +1,459 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import pydantic_v1
+from ..core.request_options import RequestOptions
+from ..errors.internal_server_error import InternalServerError
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3
+from ..types.embeddings_page_request_selected_model import EmbeddingsPageRequestSelectedModel
+from ..types.embeddings_page_response import EmbeddingsPageResponse
+from ..types.embeddings_page_status_response import EmbeddingsPageStatusResponse
+from ..types.failed_reponse_model_v2 import FailedReponseModelV2
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.recipe_function import RecipeFunction
+from ..types.run_settings import RunSettings
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class EmbeddingsClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def post(
+ self,
+ *,
+ texts: typing.Sequence[str],
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ selected_model: typing.Optional[EmbeddingsPageRequestSelectedModel] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> EmbeddingsPageResponse:
+ """
+ Parameters
+ ----------
+ texts : typing.Sequence[str]
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ selected_model : typing.Optional[EmbeddingsPageRequestSelectedModel]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ EmbeddingsPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.embeddings.post(
+ texts=["texts"],
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v2/embeddings/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "texts": texts,
+ "selected_model": selected_model,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(EmbeddingsPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def async_embeddings(
+ self,
+ *,
+ texts: typing.Sequence[str],
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ selected_model: typing.Optional[EmbeddingsPageRequestSelectedModel] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ texts : typing.Sequence[str]
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ selected_model : typing.Optional[EmbeddingsPageRequestSelectedModel]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.embeddings.async_embeddings(
+ texts=["texts"],
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/embeddings/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "texts": texts,
+ "selected_model": selected_model,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def status_embeddings(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> EmbeddingsPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ EmbeddingsPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.embeddings.status_embeddings(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/embeddings/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(EmbeddingsPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncEmbeddingsClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def post(
+ self,
+ *,
+ texts: typing.Sequence[str],
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ selected_model: typing.Optional[EmbeddingsPageRequestSelectedModel] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> EmbeddingsPageResponse:
+ """
+ Parameters
+ ----------
+ texts : typing.Sequence[str]
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ selected_model : typing.Optional[EmbeddingsPageRequestSelectedModel]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ EmbeddingsPageResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.embeddings.post(
+ texts=["texts"],
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v2/embeddings/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "texts": texts,
+ "selected_model": selected_model,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(EmbeddingsPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def async_embeddings(
+ self,
+ *,
+ texts: typing.Sequence[str],
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ selected_model: typing.Optional[EmbeddingsPageRequestSelectedModel] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ texts : typing.Sequence[str]
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ selected_model : typing.Optional[EmbeddingsPageRequestSelectedModel]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.embeddings.async_embeddings(
+ texts=["texts"],
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/embeddings/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "texts": texts,
+ "selected_model": selected_model,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def status_embeddings(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> EmbeddingsPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ EmbeddingsPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.embeddings.status_embeddings(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/embeddings/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(EmbeddingsPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/environment.py b/src/gooey/environment.py
new file mode 100644
index 0000000..2d7ce64
--- /dev/null
+++ b/src/gooey/environment.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import enum
+
+
+class GooeyEnvironment(enum.Enum):
+ DEFAULT = "https://api.gooey.ai"
diff --git a/src/gooey/errors/__init__.py b/src/gooey/errors/__init__.py
new file mode 100644
index 0000000..80b3d3e
--- /dev/null
+++ b/src/gooey/errors/__init__.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .internal_server_error import InternalServerError
+from .payment_required_error import PaymentRequiredError
+from .too_many_requests_error import TooManyRequestsError
+from .unprocessable_entity_error import UnprocessableEntityError
+
+__all__ = ["InternalServerError", "PaymentRequiredError", "TooManyRequestsError", "UnprocessableEntityError"]
diff --git a/src/gooey/errors/internal_server_error.py b/src/gooey/errors/internal_server_error.py
new file mode 100644
index 0000000..3be52c0
--- /dev/null
+++ b/src/gooey/errors/internal_server_error.py
@@ -0,0 +1,9 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.api_error import ApiError
+from ..types.failed_reponse_model_v2 import FailedReponseModelV2
+
+
+class InternalServerError(ApiError):
+ def __init__(self, body: FailedReponseModelV2):
+ super().__init__(status_code=500, body=body)
diff --git a/src/gooey/errors/payment_required_error.py b/src/gooey/errors/payment_required_error.py
new file mode 100644
index 0000000..b0cc099
--- /dev/null
+++ b/src/gooey/errors/payment_required_error.py
@@ -0,0 +1,10 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+from ..core.api_error import ApiError
+
+
+class PaymentRequiredError(ApiError):
+ def __init__(self, body: typing.Any):
+ super().__init__(status_code=402, body=body)
diff --git a/src/gooey/errors/too_many_requests_error.py b/src/gooey/errors/too_many_requests_error.py
new file mode 100644
index 0000000..81d358c
--- /dev/null
+++ b/src/gooey/errors/too_many_requests_error.py
@@ -0,0 +1,9 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.api_error import ApiError
+from ..types.generic_error_response import GenericErrorResponse
+
+
+class TooManyRequestsError(ApiError):
+ def __init__(self, body: GenericErrorResponse):
+ super().__init__(status_code=429, body=body)
diff --git a/src/gooey/errors/unprocessable_entity_error.py b/src/gooey/errors/unprocessable_entity_error.py
new file mode 100644
index 0000000..47470a7
--- /dev/null
+++ b/src/gooey/errors/unprocessable_entity_error.py
@@ -0,0 +1,9 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.api_error import ApiError
+from ..types.http_validation_error import HttpValidationError
+
+
+class UnprocessableEntityError(ApiError):
+ def __init__(self, body: HttpValidationError):
+ super().__init__(status_code=422, body=body)
diff --git a/src/gooey/evaluator/__init__.py b/src/gooey/evaluator/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/evaluator/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/evaluator/client.py b/src/gooey/evaluator/client.py
new file mode 100644
index 0000000..3bb7e66
--- /dev/null
+++ b/src/gooey/evaluator/client.py
@@ -0,0 +1,601 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import pydantic_v1
+from ..core.request_options import RequestOptions
+from ..errors.internal_server_error import InternalServerError
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.agg_function import AggFunction
+from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3
+from ..types.bulk_eval_page_request_selected_model import BulkEvalPageRequestSelectedModel
+from ..types.bulk_eval_page_response import BulkEvalPageResponse
+from ..types.bulk_eval_page_status_response import BulkEvalPageStatusResponse
+from ..types.eval_prompt import EvalPrompt
+from ..types.failed_reponse_model_v2 import FailedReponseModelV2
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.recipe_function import RecipeFunction
+from ..types.run_settings import RunSettings
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class EvaluatorClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def bulk_eval(
+ self,
+ *,
+ documents: typing.Sequence[str],
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ selected_model: typing.Optional[BulkEvalPageRequestSelectedModel] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ eval_prompts: typing.Optional[typing.Sequence[EvalPrompt]] = OMIT,
+ agg_functions: typing.Optional[typing.Sequence[AggFunction]] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> BulkEvalPageResponse:
+ """
+ Parameters
+ ----------
+ documents : typing.Sequence[str]
+ Upload or link to a CSV or google sheet that contains your sample input data.
+ For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs.
+ Remember to includes header names in your CSV too.
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ selected_model : typing.Optional[BulkEvalPageRequestSelectedModel]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ eval_prompts : typing.Optional[typing.Sequence[EvalPrompt]]
+ Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values.
+ _The `columns` dictionary can be used to reference the spreadsheet columns._
+
+
+ agg_functions : typing.Optional[typing.Sequence[AggFunction]]
+ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats).
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ BulkEvalPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.evaluator.bulk_eval(
+ documents=["documents"],
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v2/bulk-eval/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "selected_model": selected_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "documents": documents,
+ "eval_prompts": eval_prompts,
+ "agg_functions": agg_functions,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(BulkEvalPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def async_bulk_eval(
+ self,
+ *,
+ documents: typing.Sequence[str],
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ selected_model: typing.Optional[BulkEvalPageRequestSelectedModel] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ eval_prompts: typing.Optional[typing.Sequence[EvalPrompt]] = OMIT,
+ agg_functions: typing.Optional[typing.Sequence[AggFunction]] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ documents : typing.Sequence[str]
+ Upload or link to a CSV or google sheet that contains your sample input data.
+ For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs.
+ Remember to includes header names in your CSV too.
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ selected_model : typing.Optional[BulkEvalPageRequestSelectedModel]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ eval_prompts : typing.Optional[typing.Sequence[EvalPrompt]]
+ Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values.
+ _The `columns` dictionary can be used to reference the spreadsheet columns._
+
+
+ agg_functions : typing.Optional[typing.Sequence[AggFunction]]
+ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats).
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.evaluator.async_bulk_eval(
+ documents=["documents"],
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/bulk-eval/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "selected_model": selected_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "documents": documents,
+ "eval_prompts": eval_prompts,
+ "agg_functions": agg_functions,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def status_bulk_eval(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> BulkEvalPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ BulkEvalPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.evaluator.status_bulk_eval(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/bulk-eval/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(BulkEvalPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncEvaluatorClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def bulk_eval(
+ self,
+ *,
+ documents: typing.Sequence[str],
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ selected_model: typing.Optional[BulkEvalPageRequestSelectedModel] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ eval_prompts: typing.Optional[typing.Sequence[EvalPrompt]] = OMIT,
+ agg_functions: typing.Optional[typing.Sequence[AggFunction]] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> BulkEvalPageResponse:
+ """
+ Parameters
+ ----------
+ documents : typing.Sequence[str]
+ Upload or link to a CSV or google sheet that contains your sample input data.
+ For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs.
+ Remember to includes header names in your CSV too.
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ selected_model : typing.Optional[BulkEvalPageRequestSelectedModel]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ eval_prompts : typing.Optional[typing.Sequence[EvalPrompt]]
+ Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values.
+ _The `columns` dictionary can be used to reference the spreadsheet columns._
+
+
+ agg_functions : typing.Optional[typing.Sequence[AggFunction]]
+ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats).
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ BulkEvalPageResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.evaluator.bulk_eval(
+ documents=["documents"],
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v2/bulk-eval/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "selected_model": selected_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "documents": documents,
+ "eval_prompts": eval_prompts,
+ "agg_functions": agg_functions,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(BulkEvalPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def async_bulk_eval(
+ self,
+ *,
+ documents: typing.Sequence[str],
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ selected_model: typing.Optional[BulkEvalPageRequestSelectedModel] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ eval_prompts: typing.Optional[typing.Sequence[EvalPrompt]] = OMIT,
+ agg_functions: typing.Optional[typing.Sequence[AggFunction]] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ documents : typing.Sequence[str]
+ Upload or link to a CSV or google sheet that contains your sample input data.
+ For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs.
+ Remember to includes header names in your CSV too.
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ selected_model : typing.Optional[BulkEvalPageRequestSelectedModel]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ eval_prompts : typing.Optional[typing.Sequence[EvalPrompt]]
+ Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values.
+ _The `columns` dictionary can be used to reference the spreadsheet columns._
+
+
+ agg_functions : typing.Optional[typing.Sequence[AggFunction]]
+ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats).
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.evaluator.async_bulk_eval(
+ documents=["documents"],
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/bulk-eval/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "selected_model": selected_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "documents": documents,
+ "eval_prompts": eval_prompts,
+ "agg_functions": agg_functions,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def status_bulk_eval(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> BulkEvalPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ BulkEvalPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.evaluator.status_bulk_eval(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/bulk-eval/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(BulkEvalPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/functions/__init__.py b/src/gooey/functions/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/functions/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/functions/client.py b/src/gooey/functions/client.py
new file mode 100644
index 0000000..53cced6
--- /dev/null
+++ b/src/gooey/functions/client.py
@@ -0,0 +1,405 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import pydantic_v1
+from ..core.request_options import RequestOptions
+from ..errors.internal_server_error import InternalServerError
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3
+from ..types.failed_reponse_model_v2 import FailedReponseModelV2
+from ..types.functions_page_response import FunctionsPageResponse
+from ..types.functions_page_status_response import FunctionsPageStatusResponse
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.run_settings import RunSettings
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class FunctionsClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def post(
+ self,
+ *,
+ code: typing.Optional[str] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> FunctionsPageResponse:
+ """
+ Parameters
+ ----------
+ code : typing.Optional[str]
+ The JS code to be executed.
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used in the code
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ FunctionsPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.functions.post()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v2/functions/",
+ method="POST",
+ json={"code": code, "variables": variables, "settings": settings},
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(FunctionsPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def async_functions(
+ self,
+ *,
+ code: typing.Optional[str] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ code : typing.Optional[str]
+ The JS code to be executed.
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used in the code
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.functions.async_functions()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/functions/async/",
+ method="POST",
+ json={"code": code, "variables": variables, "settings": settings},
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def status_functions(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> FunctionsPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ FunctionsPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.functions.status_functions(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/functions/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(FunctionsPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncFunctionsClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def post(
+ self,
+ *,
+ code: typing.Optional[str] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> FunctionsPageResponse:
+ """
+ Parameters
+ ----------
+ code : typing.Optional[str]
+ The JS code to be executed.
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used in the code
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ FunctionsPageResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.functions.post()
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v2/functions/",
+ method="POST",
+ json={"code": code, "variables": variables, "settings": settings},
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(FunctionsPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def async_functions(
+ self,
+ *,
+ code: typing.Optional[str] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ code : typing.Optional[str]
+ The JS code to be executed.
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used in the code
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.functions.async_functions()
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/functions/async/",
+ method="POST",
+ json={"code": code, "variables": variables, "settings": settings},
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def status_functions(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> FunctionsPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ FunctionsPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.functions.status_functions(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/functions/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(FunctionsPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/generate_people_also_ask_seo_content/__init__.py b/src/gooey/generate_people_also_ask_seo_content/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/generate_people_also_ask_seo_content/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/generate_people_also_ask_seo_content/client.py b/src/gooey/generate_people_also_ask_seo_content/client.py
new file mode 100644
index 0000000..7af70bd
--- /dev/null
+++ b/src/gooey/generate_people_also_ask_seo_content/client.py
@@ -0,0 +1,770 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import pydantic_v1
+from ..core.request_options import RequestOptions
+from ..errors.internal_server_error import InternalServerError
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3
+from ..types.failed_reponse_model_v2 import FailedReponseModelV2
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.recipe_function import RecipeFunction
+from ..types.related_qn_a_page_request_embedding_model import RelatedQnAPageRequestEmbeddingModel
+from ..types.related_qn_a_page_request_selected_model import RelatedQnAPageRequestSelectedModel
+from ..types.related_qn_a_page_response import RelatedQnAPageResponse
+from ..types.related_qn_a_page_status_response import RelatedQnAPageStatusResponse
+from ..types.run_settings import RunSettings
+from ..types.serp_search_location import SerpSearchLocation
+from ..types.serp_search_type import SerpSearchType
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class GeneratePeopleAlsoAskSeoContentClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def related_qna_maker(
+ self,
+ *,
+ search_query: str,
+ site_filter: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ serp_search_location: typing.Optional[SerpSearchLocation] = OMIT,
+ scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
+ serp_search_type: typing.Optional[SerpSearchType] = OMIT,
+ scaleserp_search_field: typing.Optional[str] = OMIT,
+ task_instructions: typing.Optional[str] = OMIT,
+ query_instructions: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[RelatedQnAPageRequestSelectedModel] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ max_search_urls: typing.Optional[int] = OMIT,
+ max_references: typing.Optional[int] = OMIT,
+ max_context_words: typing.Optional[int] = OMIT,
+ scroll_jump: typing.Optional[int] = OMIT,
+ embedding_model: typing.Optional[RelatedQnAPageRequestEmbeddingModel] = OMIT,
+ dense_weight: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> RelatedQnAPageResponse:
+ """
+ Parameters
+ ----------
+ search_query : str
+
+ site_filter : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ serp_search_location : typing.Optional[SerpSearchLocation]
+
+ scaleserp_locations : typing.Optional[typing.Sequence[str]]
+ DEPRECATED: use `serp_search_location` instead
+
+ serp_search_type : typing.Optional[SerpSearchType]
+
+ scaleserp_search_field : typing.Optional[str]
+ DEPRECATED: use `serp_search_type` instead
+
+ task_instructions : typing.Optional[str]
+
+ query_instructions : typing.Optional[str]
+
+ selected_model : typing.Optional[RelatedQnAPageRequestSelectedModel]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ max_search_urls : typing.Optional[int]
+
+ max_references : typing.Optional[int]
+
+ max_context_words : typing.Optional[int]
+
+ scroll_jump : typing.Optional[int]
+
+ embedding_model : typing.Optional[RelatedQnAPageRequestEmbeddingModel]
+
+ dense_weight : typing.Optional[float]
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ RelatedQnAPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.generate_people_also_ask_seo_content.related_qna_maker(
+ search_query="search_query",
+ site_filter="site_filter",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v2/related-qna-maker/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "serp_search_location": serp_search_location,
+ "scaleserp_locations": scaleserp_locations,
+ "serp_search_type": serp_search_type,
+ "scaleserp_search_field": scaleserp_search_field,
+ "search_query": search_query,
+ "site_filter": site_filter,
+ "task_instructions": task_instructions,
+ "query_instructions": query_instructions,
+ "selected_model": selected_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "max_search_urls": max_search_urls,
+ "max_references": max_references,
+ "max_context_words": max_context_words,
+ "scroll_jump": scroll_jump,
+ "embedding_model": embedding_model,
+ "dense_weight": dense_weight,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(RelatedQnAPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def async_related_qna_maker(
+ self,
+ *,
+ search_query: str,
+ site_filter: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ serp_search_location: typing.Optional[SerpSearchLocation] = OMIT,
+ scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
+ serp_search_type: typing.Optional[SerpSearchType] = OMIT,
+ scaleserp_search_field: typing.Optional[str] = OMIT,
+ task_instructions: typing.Optional[str] = OMIT,
+ query_instructions: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[RelatedQnAPageRequestSelectedModel] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ max_search_urls: typing.Optional[int] = OMIT,
+ max_references: typing.Optional[int] = OMIT,
+ max_context_words: typing.Optional[int] = OMIT,
+ scroll_jump: typing.Optional[int] = OMIT,
+ embedding_model: typing.Optional[RelatedQnAPageRequestEmbeddingModel] = OMIT,
+ dense_weight: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ search_query : str
+
+ site_filter : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ serp_search_location : typing.Optional[SerpSearchLocation]
+
+ scaleserp_locations : typing.Optional[typing.Sequence[str]]
+ DEPRECATED: use `serp_search_location` instead
+
+ serp_search_type : typing.Optional[SerpSearchType]
+
+ scaleserp_search_field : typing.Optional[str]
+ DEPRECATED: use `serp_search_type` instead
+
+ task_instructions : typing.Optional[str]
+
+ query_instructions : typing.Optional[str]
+
+ selected_model : typing.Optional[RelatedQnAPageRequestSelectedModel]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ max_search_urls : typing.Optional[int]
+
+ max_references : typing.Optional[int]
+
+ max_context_words : typing.Optional[int]
+
+ scroll_jump : typing.Optional[int]
+
+ embedding_model : typing.Optional[RelatedQnAPageRequestEmbeddingModel]
+
+ dense_weight : typing.Optional[float]
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.generate_people_also_ask_seo_content.async_related_qna_maker(
+ search_query="search_query",
+ site_filter="site_filter",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/related-qna-maker/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "serp_search_location": serp_search_location,
+ "scaleserp_locations": scaleserp_locations,
+ "serp_search_type": serp_search_type,
+ "scaleserp_search_field": scaleserp_search_field,
+ "search_query": search_query,
+ "site_filter": site_filter,
+ "task_instructions": task_instructions,
+ "query_instructions": query_instructions,
+ "selected_model": selected_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "max_search_urls": max_search_urls,
+ "max_references": max_references,
+ "max_context_words": max_context_words,
+ "scroll_jump": scroll_jump,
+ "embedding_model": embedding_model,
+ "dense_weight": dense_weight,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def status_related_qna_maker(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> RelatedQnAPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ RelatedQnAPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.generate_people_also_ask_seo_content.status_related_qna_maker(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/related-qna-maker/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(RelatedQnAPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncGeneratePeopleAlsoAskSeoContentClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def related_qna_maker(
+ self,
+ *,
+ search_query: str,
+ site_filter: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ serp_search_location: typing.Optional[SerpSearchLocation] = OMIT,
+ scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
+ serp_search_type: typing.Optional[SerpSearchType] = OMIT,
+ scaleserp_search_field: typing.Optional[str] = OMIT,
+ task_instructions: typing.Optional[str] = OMIT,
+ query_instructions: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[RelatedQnAPageRequestSelectedModel] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ max_search_urls: typing.Optional[int] = OMIT,
+ max_references: typing.Optional[int] = OMIT,
+ max_context_words: typing.Optional[int] = OMIT,
+ scroll_jump: typing.Optional[int] = OMIT,
+ embedding_model: typing.Optional[RelatedQnAPageRequestEmbeddingModel] = OMIT,
+ dense_weight: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> RelatedQnAPageResponse:
+ """
+ Parameters
+ ----------
+ search_query : str
+
+ site_filter : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ serp_search_location : typing.Optional[SerpSearchLocation]
+
+ scaleserp_locations : typing.Optional[typing.Sequence[str]]
+ DEPRECATED: use `serp_search_location` instead
+
+ serp_search_type : typing.Optional[SerpSearchType]
+
+ scaleserp_search_field : typing.Optional[str]
+ DEPRECATED: use `serp_search_type` instead
+
+ task_instructions : typing.Optional[str]
+
+ query_instructions : typing.Optional[str]
+
+ selected_model : typing.Optional[RelatedQnAPageRequestSelectedModel]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ max_search_urls : typing.Optional[int]
+
+ max_references : typing.Optional[int]
+
+ max_context_words : typing.Optional[int]
+
+ scroll_jump : typing.Optional[int]
+
+ embedding_model : typing.Optional[RelatedQnAPageRequestEmbeddingModel]
+
+ dense_weight : typing.Optional[float]
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ RelatedQnAPageResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.generate_people_also_ask_seo_content.related_qna_maker(
+ search_query="search_query",
+ site_filter="site_filter",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v2/related-qna-maker/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "serp_search_location": serp_search_location,
+ "scaleserp_locations": scaleserp_locations,
+ "serp_search_type": serp_search_type,
+ "scaleserp_search_field": scaleserp_search_field,
+ "search_query": search_query,
+ "site_filter": site_filter,
+ "task_instructions": task_instructions,
+ "query_instructions": query_instructions,
+ "selected_model": selected_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "max_search_urls": max_search_urls,
+ "max_references": max_references,
+ "max_context_words": max_context_words,
+ "scroll_jump": scroll_jump,
+ "embedding_model": embedding_model,
+ "dense_weight": dense_weight,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(RelatedQnAPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def async_related_qna_maker(
+ self,
+ *,
+ search_query: str,
+ site_filter: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ serp_search_location: typing.Optional[SerpSearchLocation] = OMIT,
+ scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
+ serp_search_type: typing.Optional[SerpSearchType] = OMIT,
+ scaleserp_search_field: typing.Optional[str] = OMIT,
+ task_instructions: typing.Optional[str] = OMIT,
+ query_instructions: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[RelatedQnAPageRequestSelectedModel] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ max_search_urls: typing.Optional[int] = OMIT,
+ max_references: typing.Optional[int] = OMIT,
+ max_context_words: typing.Optional[int] = OMIT,
+ scroll_jump: typing.Optional[int] = OMIT,
+ embedding_model: typing.Optional[RelatedQnAPageRequestEmbeddingModel] = OMIT,
+ dense_weight: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ search_query : str
+
+ site_filter : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ serp_search_location : typing.Optional[SerpSearchLocation]
+
+ scaleserp_locations : typing.Optional[typing.Sequence[str]]
+ DEPRECATED: use `serp_search_location` instead
+
+ serp_search_type : typing.Optional[SerpSearchType]
+
+ scaleserp_search_field : typing.Optional[str]
+ DEPRECATED: use `serp_search_type` instead
+
+ task_instructions : typing.Optional[str]
+
+ query_instructions : typing.Optional[str]
+
+ selected_model : typing.Optional[RelatedQnAPageRequestSelectedModel]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ max_search_urls : typing.Optional[int]
+
+ max_references : typing.Optional[int]
+
+ max_context_words : typing.Optional[int]
+
+ scroll_jump : typing.Optional[int]
+
+ embedding_model : typing.Optional[RelatedQnAPageRequestEmbeddingModel]
+
+ dense_weight : typing.Optional[float]
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.generate_people_also_ask_seo_content.async_related_qna_maker(
+ search_query="search_query",
+ site_filter="site_filter",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/related-qna-maker/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "serp_search_location": serp_search_location,
+ "scaleserp_locations": scaleserp_locations,
+ "serp_search_type": serp_search_type,
+ "scaleserp_search_field": scaleserp_search_field,
+ "search_query": search_query,
+ "site_filter": site_filter,
+ "task_instructions": task_instructions,
+ "query_instructions": query_instructions,
+ "selected_model": selected_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "max_search_urls": max_search_urls,
+ "max_references": max_references,
+ "max_context_words": max_context_words,
+ "scroll_jump": scroll_jump,
+ "embedding_model": embedding_model,
+ "dense_weight": dense_weight,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def status_related_qna_maker(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> RelatedQnAPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ RelatedQnAPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.generate_people_also_ask_seo_content.status_related_qna_maker(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/related-qna-maker/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(RelatedQnAPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/generate_product_photo_backgrounds/__init__.py b/src/gooey/generate_product_photo_backgrounds/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/generate_product_photo_backgrounds/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/generate_product_photo_backgrounds/client.py b/src/gooey/generate_product_photo_backgrounds/client.py
new file mode 100644
index 0000000..7fd0720
--- /dev/null
+++ b/src/gooey/generate_product_photo_backgrounds/client.py
@@ -0,0 +1,671 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import pydantic_v1
+from ..core.request_options import RequestOptions
+from ..errors.internal_server_error import InternalServerError
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3
+from ..types.failed_reponse_model_v2 import FailedReponseModelV2
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.object_inpainting_page_request_selected_model import ObjectInpaintingPageRequestSelectedModel
+from ..types.object_inpainting_page_response import ObjectInpaintingPageResponse
+from ..types.object_inpainting_page_status_response import ObjectInpaintingPageStatusResponse
+from ..types.recipe_function import RecipeFunction
+from ..types.run_settings import RunSettings
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class GenerateProductPhotoBackgroundsClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def object_inpainting(
+ self,
+ *,
+ input_image: str,
+ text_prompt: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ obj_scale: typing.Optional[float] = OMIT,
+ obj_pos_x: typing.Optional[float] = OMIT,
+ obj_pos_y: typing.Optional[float] = OMIT,
+ mask_threshold: typing.Optional[float] = OMIT,
+ selected_model: typing.Optional[ObjectInpaintingPageRequestSelectedModel] = OMIT,
+ negative_prompt: typing.Optional[str] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[int] = OMIT,
+ output_width: typing.Optional[int] = OMIT,
+ output_height: typing.Optional[int] = OMIT,
+ guidance_scale: typing.Optional[float] = OMIT,
+ sd2upscaling: typing.Optional[bool] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> ObjectInpaintingPageResponse:
+ """
+ Parameters
+ ----------
+ input_image : str
+
+ text_prompt : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ obj_scale : typing.Optional[float]
+
+ obj_pos_x : typing.Optional[float]
+
+ obj_pos_y : typing.Optional[float]
+
+ mask_threshold : typing.Optional[float]
+
+ selected_model : typing.Optional[ObjectInpaintingPageRequestSelectedModel]
+
+ negative_prompt : typing.Optional[str]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ output_width : typing.Optional[int]
+
+ output_height : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ sd2upscaling : typing.Optional[bool]
+
+ seed : typing.Optional[int]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ObjectInpaintingPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.generate_product_photo_backgrounds.object_inpainting(
+ input_image="input_image",
+ text_prompt="text_prompt",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v2/ObjectInpainting/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "input_image": input_image,
+ "text_prompt": text_prompt,
+ "obj_scale": obj_scale,
+ "obj_pos_x": obj_pos_x,
+ "obj_pos_y": obj_pos_y,
+ "mask_threshold": mask_threshold,
+ "selected_model": selected_model,
+ "negative_prompt": negative_prompt,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "output_width": output_width,
+ "output_height": output_height,
+ "guidance_scale": guidance_scale,
+ "sd_2_upscaling": sd2upscaling,
+ "seed": seed,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(ObjectInpaintingPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def async_object_inpainting(
+ self,
+ *,
+ input_image: str,
+ text_prompt: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ obj_scale: typing.Optional[float] = OMIT,
+ obj_pos_x: typing.Optional[float] = OMIT,
+ obj_pos_y: typing.Optional[float] = OMIT,
+ mask_threshold: typing.Optional[float] = OMIT,
+ selected_model: typing.Optional[ObjectInpaintingPageRequestSelectedModel] = OMIT,
+ negative_prompt: typing.Optional[str] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[int] = OMIT,
+ output_width: typing.Optional[int] = OMIT,
+ output_height: typing.Optional[int] = OMIT,
+ guidance_scale: typing.Optional[float] = OMIT,
+ sd2upscaling: typing.Optional[bool] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ input_image : str
+
+ text_prompt : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ obj_scale : typing.Optional[float]
+
+ obj_pos_x : typing.Optional[float]
+
+ obj_pos_y : typing.Optional[float]
+
+ mask_threshold : typing.Optional[float]
+
+ selected_model : typing.Optional[ObjectInpaintingPageRequestSelectedModel]
+
+ negative_prompt : typing.Optional[str]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ output_width : typing.Optional[int]
+
+ output_height : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ sd2upscaling : typing.Optional[bool]
+
+ seed : typing.Optional[int]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.generate_product_photo_backgrounds.async_object_inpainting(
+ input_image="input_image",
+ text_prompt="text_prompt",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/ObjectInpainting/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "input_image": input_image,
+ "text_prompt": text_prompt,
+ "obj_scale": obj_scale,
+ "obj_pos_x": obj_pos_x,
+ "obj_pos_y": obj_pos_y,
+ "mask_threshold": mask_threshold,
+ "selected_model": selected_model,
+ "negative_prompt": negative_prompt,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "output_width": output_width,
+ "output_height": output_height,
+ "guidance_scale": guidance_scale,
+ "sd_2_upscaling": sd2upscaling,
+ "seed": seed,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def status_object_inpainting(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> ObjectInpaintingPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ObjectInpaintingPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.generate_product_photo_backgrounds.status_object_inpainting(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/ObjectInpainting/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(ObjectInpaintingPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncGenerateProductPhotoBackgroundsClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def object_inpainting(
+ self,
+ *,
+ input_image: str,
+ text_prompt: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ obj_scale: typing.Optional[float] = OMIT,
+ obj_pos_x: typing.Optional[float] = OMIT,
+ obj_pos_y: typing.Optional[float] = OMIT,
+ mask_threshold: typing.Optional[float] = OMIT,
+ selected_model: typing.Optional[ObjectInpaintingPageRequestSelectedModel] = OMIT,
+ negative_prompt: typing.Optional[str] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[int] = OMIT,
+ output_width: typing.Optional[int] = OMIT,
+ output_height: typing.Optional[int] = OMIT,
+ guidance_scale: typing.Optional[float] = OMIT,
+ sd2upscaling: typing.Optional[bool] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> ObjectInpaintingPageResponse:
+ """
+ Parameters
+ ----------
+ input_image : str
+
+ text_prompt : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ obj_scale : typing.Optional[float]
+
+ obj_pos_x : typing.Optional[float]
+
+ obj_pos_y : typing.Optional[float]
+
+ mask_threshold : typing.Optional[float]
+
+ selected_model : typing.Optional[ObjectInpaintingPageRequestSelectedModel]
+
+ negative_prompt : typing.Optional[str]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ output_width : typing.Optional[int]
+
+ output_height : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ sd2upscaling : typing.Optional[bool]
+
+ seed : typing.Optional[int]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ObjectInpaintingPageResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.generate_product_photo_backgrounds.object_inpainting(
+ input_image="input_image",
+ text_prompt="text_prompt",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v2/ObjectInpainting/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "input_image": input_image,
+ "text_prompt": text_prompt,
+ "obj_scale": obj_scale,
+ "obj_pos_x": obj_pos_x,
+ "obj_pos_y": obj_pos_y,
+ "mask_threshold": mask_threshold,
+ "selected_model": selected_model,
+ "negative_prompt": negative_prompt,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "output_width": output_width,
+ "output_height": output_height,
+ "guidance_scale": guidance_scale,
+ "sd_2_upscaling": sd2upscaling,
+ "seed": seed,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(ObjectInpaintingPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def async_object_inpainting(
+ self,
+ *,
+ input_image: str,
+ text_prompt: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ obj_scale: typing.Optional[float] = OMIT,
+ obj_pos_x: typing.Optional[float] = OMIT,
+ obj_pos_y: typing.Optional[float] = OMIT,
+ mask_threshold: typing.Optional[float] = OMIT,
+ selected_model: typing.Optional[ObjectInpaintingPageRequestSelectedModel] = OMIT,
+ negative_prompt: typing.Optional[str] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[int] = OMIT,
+ output_width: typing.Optional[int] = OMIT,
+ output_height: typing.Optional[int] = OMIT,
+ guidance_scale: typing.Optional[float] = OMIT,
+ sd2upscaling: typing.Optional[bool] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ input_image : str
+
+ text_prompt : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ obj_scale : typing.Optional[float]
+
+ obj_pos_x : typing.Optional[float]
+
+ obj_pos_y : typing.Optional[float]
+
+ mask_threshold : typing.Optional[float]
+
+ selected_model : typing.Optional[ObjectInpaintingPageRequestSelectedModel]
+
+ negative_prompt : typing.Optional[str]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ output_width : typing.Optional[int]
+
+ output_height : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ sd2upscaling : typing.Optional[bool]
+
+ seed : typing.Optional[int]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.generate_product_photo_backgrounds.async_object_inpainting(
+ input_image="input_image",
+ text_prompt="text_prompt",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/ObjectInpainting/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "input_image": input_image,
+ "text_prompt": text_prompt,
+ "obj_scale": obj_scale,
+ "obj_pos_x": obj_pos_x,
+ "obj_pos_y": obj_pos_y,
+ "mask_threshold": mask_threshold,
+ "selected_model": selected_model,
+ "negative_prompt": negative_prompt,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "output_width": output_width,
+ "output_height": output_height,
+ "guidance_scale": guidance_scale,
+ "sd_2_upscaling": sd2upscaling,
+ "seed": seed,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def status_object_inpainting(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> ObjectInpaintingPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ObjectInpaintingPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.generate_product_photo_backgrounds.status_object_inpainting(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/ObjectInpainting/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(ObjectInpaintingPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/large_language_models_gpt3/__init__.py b/src/gooey/large_language_models_gpt3/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/large_language_models_gpt3/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/large_language_models_gpt3/client.py b/src/gooey/large_language_models_gpt3/client.py
new file mode 100644
index 0000000..236d627
--- /dev/null
+++ b/src/gooey/large_language_models_gpt3/client.py
@@ -0,0 +1,548 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import pydantic_v1
+from ..core.request_options import RequestOptions
+from ..errors.internal_server_error import InternalServerError
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3
+from ..types.compare_llm_page_request_response_format_type import CompareLlmPageRequestResponseFormatType
+from ..types.compare_llm_page_request_selected_models_item import CompareLlmPageRequestSelectedModelsItem
+from ..types.compare_llm_page_response import CompareLlmPageResponse
+from ..types.compare_llm_page_status_response import CompareLlmPageStatusResponse
+from ..types.failed_reponse_model_v2 import FailedReponseModelV2
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.recipe_function import RecipeFunction
+from ..types.run_settings import RunSettings
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class LargeLanguageModelsGpt3Client:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def compare_llm(
+ self,
+ *,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ input_prompt: typing.Optional[str] = OMIT,
+ selected_models: typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ response_format_type: typing.Optional[CompareLlmPageRequestResponseFormatType] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> CompareLlmPageResponse:
+ """
+ Parameters
+ ----------
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ input_prompt : typing.Optional[str]
+
+ selected_models : typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ response_format_type : typing.Optional[CompareLlmPageRequestResponseFormatType]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ CompareLlmPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.large_language_models_gpt3.compare_llm()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v2/CompareLLM/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "input_prompt": input_prompt,
+ "selected_models": selected_models,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "response_format_type": response_format_type,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(CompareLlmPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def async_compare_llm(
+ self,
+ *,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ input_prompt: typing.Optional[str] = OMIT,
+ selected_models: typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ response_format_type: typing.Optional[CompareLlmPageRequestResponseFormatType] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ input_prompt : typing.Optional[str]
+
+ selected_models : typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ response_format_type : typing.Optional[CompareLlmPageRequestResponseFormatType]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.large_language_models_gpt3.async_compare_llm()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/CompareLLM/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "input_prompt": input_prompt,
+ "selected_models": selected_models,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "response_format_type": response_format_type,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def status_compare_llm(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> CompareLlmPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ CompareLlmPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.large_language_models_gpt3.status_compare_llm(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/CompareLLM/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(CompareLlmPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncLargeLanguageModelsGpt3Client:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def compare_llm(
+ self,
+ *,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ input_prompt: typing.Optional[str] = OMIT,
+ selected_models: typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ response_format_type: typing.Optional[CompareLlmPageRequestResponseFormatType] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> CompareLlmPageResponse:
+ """
+ Parameters
+ ----------
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ input_prompt : typing.Optional[str]
+
+ selected_models : typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ response_format_type : typing.Optional[CompareLlmPageRequestResponseFormatType]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ CompareLlmPageResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.large_language_models_gpt3.compare_llm()
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v2/CompareLLM/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "input_prompt": input_prompt,
+ "selected_models": selected_models,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "response_format_type": response_format_type,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(CompareLlmPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def async_compare_llm(
+ self,
+ *,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ input_prompt: typing.Optional[str] = OMIT,
+ selected_models: typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ response_format_type: typing.Optional[CompareLlmPageRequestResponseFormatType] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ input_prompt : typing.Optional[str]
+
+ selected_models : typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ response_format_type : typing.Optional[CompareLlmPageRequestResponseFormatType]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.large_language_models_gpt3.async_compare_llm()
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/CompareLLM/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "input_prompt": input_prompt,
+ "selected_models": selected_models,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "response_format_type": response_format_type,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def status_compare_llm(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> CompareLlmPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ CompareLlmPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.large_language_models_gpt3.status_compare_llm(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/CompareLLM/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(CompareLlmPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/letter_writer/__init__.py b/src/gooey/letter_writer/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/letter_writer/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/letter_writer/client.py b/src/gooey/letter_writer/client.py
new file mode 100644
index 0000000..c5833b7
--- /dev/null
+++ b/src/gooey/letter_writer/client.py
@@ -0,0 +1,651 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import pydantic_v1
+from ..core.request_options import RequestOptions
+from ..errors.internal_server_error import InternalServerError
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3
+from ..types.failed_reponse_model_v2 import FailedReponseModelV2
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.letter_writer_page_response import LetterWriterPageResponse
+from ..types.letter_writer_page_status_response import LetterWriterPageStatusResponse
+from ..types.recipe_function import RecipeFunction
+from ..types.run_settings import RunSettings
+from ..types.training_data_model import TrainingDataModel
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class LetterWriterClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def letter_writer(
+ self,
+ *,
+ action_id: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ prompt_header: typing.Optional[str] = OMIT,
+ example_letters: typing.Optional[typing.Sequence[TrainingDataModel]] = OMIT,
+ lm_selected_api: typing.Optional[str] = OMIT,
+ lm_selected_engine: typing.Optional[str] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ lm_sampling_temperature: typing.Optional[float] = OMIT,
+ api_http_method: typing.Optional[str] = OMIT,
+ api_url: typing.Optional[str] = OMIT,
+ api_headers: typing.Optional[str] = OMIT,
+ api_json_body: typing.Optional[str] = OMIT,
+ input_prompt: typing.Optional[str] = OMIT,
+ strip_html2text: typing.Optional[bool] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> LetterWriterPageResponse:
+ """
+ Parameters
+ ----------
+ action_id : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ prompt_header : typing.Optional[str]
+
+ example_letters : typing.Optional[typing.Sequence[TrainingDataModel]]
+
+ lm_selected_api : typing.Optional[str]
+
+ lm_selected_engine : typing.Optional[str]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ lm_sampling_temperature : typing.Optional[float]
+
+ api_http_method : typing.Optional[str]
+
+ api_url : typing.Optional[str]
+
+ api_headers : typing.Optional[str]
+
+ api_json_body : typing.Optional[str]
+
+ input_prompt : typing.Optional[str]
+
+ strip_html2text : typing.Optional[bool]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ LetterWriterPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.letter_writer.letter_writer(
+ action_id="action_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v2/LetterWriter/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "action_id": action_id,
+ "prompt_header": prompt_header,
+ "example_letters": example_letters,
+ "lm_selected_api": lm_selected_api,
+ "lm_selected_engine": lm_selected_engine,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "lm_sampling_temperature": lm_sampling_temperature,
+ "api_http_method": api_http_method,
+ "api_url": api_url,
+ "api_headers": api_headers,
+ "api_json_body": api_json_body,
+ "input_prompt": input_prompt,
+ "strip_html_2_text": strip_html2text,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(LetterWriterPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def async_letter_writer(
+ self,
+ *,
+ action_id: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ prompt_header: typing.Optional[str] = OMIT,
+ example_letters: typing.Optional[typing.Sequence[TrainingDataModel]] = OMIT,
+ lm_selected_api: typing.Optional[str] = OMIT,
+ lm_selected_engine: typing.Optional[str] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ lm_sampling_temperature: typing.Optional[float] = OMIT,
+ api_http_method: typing.Optional[str] = OMIT,
+ api_url: typing.Optional[str] = OMIT,
+ api_headers: typing.Optional[str] = OMIT,
+ api_json_body: typing.Optional[str] = OMIT,
+ input_prompt: typing.Optional[str] = OMIT,
+ strip_html2text: typing.Optional[bool] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ action_id : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ prompt_header : typing.Optional[str]
+
+ example_letters : typing.Optional[typing.Sequence[TrainingDataModel]]
+
+ lm_selected_api : typing.Optional[str]
+
+ lm_selected_engine : typing.Optional[str]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ lm_sampling_temperature : typing.Optional[float]
+
+ api_http_method : typing.Optional[str]
+
+ api_url : typing.Optional[str]
+
+ api_headers : typing.Optional[str]
+
+ api_json_body : typing.Optional[str]
+
+ input_prompt : typing.Optional[str]
+
+ strip_html2text : typing.Optional[bool]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.letter_writer.async_letter_writer(
+ action_id="action_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/LetterWriter/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "action_id": action_id,
+ "prompt_header": prompt_header,
+ "example_letters": example_letters,
+ "lm_selected_api": lm_selected_api,
+ "lm_selected_engine": lm_selected_engine,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "lm_sampling_temperature": lm_sampling_temperature,
+ "api_http_method": api_http_method,
+ "api_url": api_url,
+ "api_headers": api_headers,
+ "api_json_body": api_json_body,
+ "input_prompt": input_prompt,
+ "strip_html_2_text": strip_html2text,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def status_letter_writer(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> LetterWriterPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ LetterWriterPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.letter_writer.status_letter_writer(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/LetterWriter/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(LetterWriterPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncLetterWriterClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def letter_writer(
+ self,
+ *,
+ action_id: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ prompt_header: typing.Optional[str] = OMIT,
+ example_letters: typing.Optional[typing.Sequence[TrainingDataModel]] = OMIT,
+ lm_selected_api: typing.Optional[str] = OMIT,
+ lm_selected_engine: typing.Optional[str] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ lm_sampling_temperature: typing.Optional[float] = OMIT,
+ api_http_method: typing.Optional[str] = OMIT,
+ api_url: typing.Optional[str] = OMIT,
+ api_headers: typing.Optional[str] = OMIT,
+ api_json_body: typing.Optional[str] = OMIT,
+ input_prompt: typing.Optional[str] = OMIT,
+ strip_html2text: typing.Optional[bool] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> LetterWriterPageResponse:
+ """
+ Parameters
+ ----------
+ action_id : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ prompt_header : typing.Optional[str]
+
+ example_letters : typing.Optional[typing.Sequence[TrainingDataModel]]
+
+ lm_selected_api : typing.Optional[str]
+
+ lm_selected_engine : typing.Optional[str]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ lm_sampling_temperature : typing.Optional[float]
+
+ api_http_method : typing.Optional[str]
+
+ api_url : typing.Optional[str]
+
+ api_headers : typing.Optional[str]
+
+ api_json_body : typing.Optional[str]
+
+ input_prompt : typing.Optional[str]
+
+ strip_html2text : typing.Optional[bool]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ LetterWriterPageResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.letter_writer.letter_writer(
+ action_id="action_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v2/LetterWriter/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "action_id": action_id,
+ "prompt_header": prompt_header,
+ "example_letters": example_letters,
+ "lm_selected_api": lm_selected_api,
+ "lm_selected_engine": lm_selected_engine,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "lm_sampling_temperature": lm_sampling_temperature,
+ "api_http_method": api_http_method,
+ "api_url": api_url,
+ "api_headers": api_headers,
+ "api_json_body": api_json_body,
+ "input_prompt": input_prompt,
+ "strip_html_2_text": strip_html2text,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(LetterWriterPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def async_letter_writer(
+ self,
+ *,
+ action_id: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ prompt_header: typing.Optional[str] = OMIT,
+ example_letters: typing.Optional[typing.Sequence[TrainingDataModel]] = OMIT,
+ lm_selected_api: typing.Optional[str] = OMIT,
+ lm_selected_engine: typing.Optional[str] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ lm_sampling_temperature: typing.Optional[float] = OMIT,
+ api_http_method: typing.Optional[str] = OMIT,
+ api_url: typing.Optional[str] = OMIT,
+ api_headers: typing.Optional[str] = OMIT,
+ api_json_body: typing.Optional[str] = OMIT,
+ input_prompt: typing.Optional[str] = OMIT,
+ strip_html2text: typing.Optional[bool] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ action_id : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ prompt_header : typing.Optional[str]
+
+ example_letters : typing.Optional[typing.Sequence[TrainingDataModel]]
+
+ lm_selected_api : typing.Optional[str]
+
+ lm_selected_engine : typing.Optional[str]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ lm_sampling_temperature : typing.Optional[float]
+
+ api_http_method : typing.Optional[str]
+
+ api_url : typing.Optional[str]
+
+ api_headers : typing.Optional[str]
+
+ api_json_body : typing.Optional[str]
+
+ input_prompt : typing.Optional[str]
+
+ strip_html2text : typing.Optional[bool]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.letter_writer.async_letter_writer(
+ action_id="action_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/LetterWriter/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "action_id": action_id,
+ "prompt_header": prompt_header,
+ "example_letters": example_letters,
+ "lm_selected_api": lm_selected_api,
+ "lm_selected_engine": lm_selected_engine,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "lm_sampling_temperature": lm_sampling_temperature,
+ "api_http_method": api_http_method,
+ "api_url": api_url,
+ "api_headers": api_headers,
+ "api_json_body": api_json_body,
+ "input_prompt": input_prompt,
+ "strip_html_2_text": strip_html2text,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def status_letter_writer(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> LetterWriterPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ LetterWriterPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.letter_writer.status_letter_writer(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/LetterWriter/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(LetterWriterPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/lip_syncing/__init__.py b/src/gooey/lip_syncing/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/lip_syncing/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/lip_syncing/client.py b/src/gooey/lip_syncing/client.py
new file mode 100644
index 0000000..3d52f6e
--- /dev/null
+++ b/src/gooey/lip_syncing/client.py
@@ -0,0 +1,548 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import pydantic_v1
+from ..core.request_options import RequestOptions
+from ..errors.internal_server_error import InternalServerError
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3
+from ..types.failed_reponse_model_v2 import FailedReponseModelV2
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.lipsync_page_request_selected_model import LipsyncPageRequestSelectedModel
+from ..types.lipsync_page_response import LipsyncPageResponse
+from ..types.lipsync_page_status_response import LipsyncPageStatusResponse
+from ..types.recipe_function import RecipeFunction
+from ..types.run_settings import RunSettings
+from ..types.sad_talker_settings import SadTalkerSettings
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class LipSyncingClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def lipsync(
+ self,
+ *,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ input_face: typing.Optional[str] = OMIT,
+ face_padding_top: typing.Optional[int] = OMIT,
+ face_padding_bottom: typing.Optional[int] = OMIT,
+ face_padding_left: typing.Optional[int] = OMIT,
+ face_padding_right: typing.Optional[int] = OMIT,
+ sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT,
+ selected_model: typing.Optional[LipsyncPageRequestSelectedModel] = OMIT,
+ input_audio: typing.Optional[str] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> LipsyncPageResponse:
+ """
+ Parameters
+ ----------
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ input_face : typing.Optional[str]
+
+ face_padding_top : typing.Optional[int]
+
+ face_padding_bottom : typing.Optional[int]
+
+ face_padding_left : typing.Optional[int]
+
+ face_padding_right : typing.Optional[int]
+
+ sadtalker_settings : typing.Optional[SadTalkerSettings]
+
+ selected_model : typing.Optional[LipsyncPageRequestSelectedModel]
+
+ input_audio : typing.Optional[str]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ LipsyncPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.lip_syncing.lipsync()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v2/Lipsync/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "input_face": input_face,
+ "face_padding_top": face_padding_top,
+ "face_padding_bottom": face_padding_bottom,
+ "face_padding_left": face_padding_left,
+ "face_padding_right": face_padding_right,
+ "sadtalker_settings": sadtalker_settings,
+ "selected_model": selected_model,
+ "input_audio": input_audio,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(LipsyncPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def async_lipsync(
+ self,
+ *,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ input_face: typing.Optional[str] = OMIT,
+ face_padding_top: typing.Optional[int] = OMIT,
+ face_padding_bottom: typing.Optional[int] = OMIT,
+ face_padding_left: typing.Optional[int] = OMIT,
+ face_padding_right: typing.Optional[int] = OMIT,
+ sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT,
+ selected_model: typing.Optional[LipsyncPageRequestSelectedModel] = OMIT,
+ input_audio: typing.Optional[str] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ input_face : typing.Optional[str]
+
+ face_padding_top : typing.Optional[int]
+
+ face_padding_bottom : typing.Optional[int]
+
+ face_padding_left : typing.Optional[int]
+
+ face_padding_right : typing.Optional[int]
+
+ sadtalker_settings : typing.Optional[SadTalkerSettings]
+
+ selected_model : typing.Optional[LipsyncPageRequestSelectedModel]
+
+ input_audio : typing.Optional[str]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.lip_syncing.async_lipsync()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/Lipsync/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "input_face": input_face,
+ "face_padding_top": face_padding_top,
+ "face_padding_bottom": face_padding_bottom,
+ "face_padding_left": face_padding_left,
+ "face_padding_right": face_padding_right,
+ "sadtalker_settings": sadtalker_settings,
+ "selected_model": selected_model,
+ "input_audio": input_audio,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def status_lipsync(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> LipsyncPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ LipsyncPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.lip_syncing.status_lipsync(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/Lipsync/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(LipsyncPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncLipSyncingClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def lipsync(
+ self,
+ *,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ input_face: typing.Optional[str] = OMIT,
+ face_padding_top: typing.Optional[int] = OMIT,
+ face_padding_bottom: typing.Optional[int] = OMIT,
+ face_padding_left: typing.Optional[int] = OMIT,
+ face_padding_right: typing.Optional[int] = OMIT,
+ sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT,
+ selected_model: typing.Optional[LipsyncPageRequestSelectedModel] = OMIT,
+ input_audio: typing.Optional[str] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> LipsyncPageResponse:
+ """
+ Parameters
+ ----------
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ input_face : typing.Optional[str]
+
+ face_padding_top : typing.Optional[int]
+
+ face_padding_bottom : typing.Optional[int]
+
+ face_padding_left : typing.Optional[int]
+
+ face_padding_right : typing.Optional[int]
+
+ sadtalker_settings : typing.Optional[SadTalkerSettings]
+
+ selected_model : typing.Optional[LipsyncPageRequestSelectedModel]
+
+ input_audio : typing.Optional[str]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ LipsyncPageResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.lip_syncing.lipsync()
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v2/Lipsync/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "input_face": input_face,
+ "face_padding_top": face_padding_top,
+ "face_padding_bottom": face_padding_bottom,
+ "face_padding_left": face_padding_left,
+ "face_padding_right": face_padding_right,
+ "sadtalker_settings": sadtalker_settings,
+ "selected_model": selected_model,
+ "input_audio": input_audio,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(LipsyncPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def async_lipsync(
+ self,
+ *,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ input_face: typing.Optional[str] = OMIT,
+ face_padding_top: typing.Optional[int] = OMIT,
+ face_padding_bottom: typing.Optional[int] = OMIT,
+ face_padding_left: typing.Optional[int] = OMIT,
+ face_padding_right: typing.Optional[int] = OMIT,
+ sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT,
+ selected_model: typing.Optional[LipsyncPageRequestSelectedModel] = OMIT,
+ input_audio: typing.Optional[str] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ input_face : typing.Optional[str]
+
+ face_padding_top : typing.Optional[int]
+
+ face_padding_bottom : typing.Optional[int]
+
+ face_padding_left : typing.Optional[int]
+
+ face_padding_right : typing.Optional[int]
+
+ sadtalker_settings : typing.Optional[SadTalkerSettings]
+
+ selected_model : typing.Optional[LipsyncPageRequestSelectedModel]
+
+ input_audio : typing.Optional[str]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.lip_syncing.async_lipsync()
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/Lipsync/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "input_face": input_face,
+ "face_padding_top": face_padding_top,
+ "face_padding_bottom": face_padding_bottom,
+ "face_padding_left": face_padding_left,
+ "face_padding_right": face_padding_right,
+ "sadtalker_settings": sadtalker_settings,
+ "selected_model": selected_model,
+ "input_audio": input_audio,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def status_lipsync(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> LipsyncPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ LipsyncPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.lip_syncing.status_lipsync(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/Lipsync/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(LipsyncPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/lipsync_video_with_any_text/__init__.py b/src/gooey/lipsync_video_with_any_text/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/lipsync_video_with_any_text/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/lipsync_video_with_any_text/client.py b/src/gooey/lipsync_video_with_any_text/client.py
new file mode 100644
index 0000000..de7988e
--- /dev/null
+++ b/src/gooey/lipsync_video_with_any_text/client.py
@@ -0,0 +1,851 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import pydantic_v1
+from ..core.request_options import RequestOptions
+from ..errors.internal_server_error import InternalServerError
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3
+from ..types.failed_reponse_model_v2 import FailedReponseModelV2
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.lipsync_tts_page_request_openai_tts_model import LipsyncTtsPageRequestOpenaiTtsModel
+from ..types.lipsync_tts_page_request_openai_voice_name import LipsyncTtsPageRequestOpenaiVoiceName
+from ..types.lipsync_tts_page_request_selected_model import LipsyncTtsPageRequestSelectedModel
+from ..types.lipsync_tts_page_request_tts_provider import LipsyncTtsPageRequestTtsProvider
+from ..types.lipsync_tts_page_response import LipsyncTtsPageResponse
+from ..types.lipsync_tts_page_status_response import LipsyncTtsPageStatusResponse
+from ..types.recipe_function import RecipeFunction
+from ..types.run_settings import RunSettings
+from ..types.sad_talker_settings import SadTalkerSettings
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class LipsyncVideoWithAnyTextClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def lipsync_tts(
+ self,
+ *,
+ text_prompt: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ tts_provider: typing.Optional[LipsyncTtsPageRequestTtsProvider] = OMIT,
+ uberduck_voice_name: typing.Optional[str] = OMIT,
+ uberduck_speaking_rate: typing.Optional[float] = OMIT,
+ google_voice_name: typing.Optional[str] = OMIT,
+ google_speaking_rate: typing.Optional[float] = OMIT,
+ google_pitch: typing.Optional[float] = OMIT,
+ bark_history_prompt: typing.Optional[str] = OMIT,
+ elevenlabs_voice_name: typing.Optional[str] = OMIT,
+ elevenlabs_api_key: typing.Optional[str] = OMIT,
+ elevenlabs_voice_id: typing.Optional[str] = OMIT,
+ elevenlabs_model: typing.Optional[str] = OMIT,
+ elevenlabs_stability: typing.Optional[float] = OMIT,
+ elevenlabs_similarity_boost: typing.Optional[float] = OMIT,
+ elevenlabs_style: typing.Optional[float] = OMIT,
+ elevenlabs_speaker_boost: typing.Optional[bool] = OMIT,
+ azure_voice_name: typing.Optional[str] = OMIT,
+ openai_voice_name: typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] = OMIT,
+ openai_tts_model: typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] = OMIT,
+ input_face: typing.Optional[str] = OMIT,
+ face_padding_top: typing.Optional[int] = OMIT,
+ face_padding_bottom: typing.Optional[int] = OMIT,
+ face_padding_left: typing.Optional[int] = OMIT,
+ face_padding_right: typing.Optional[int] = OMIT,
+ sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT,
+ selected_model: typing.Optional[LipsyncTtsPageRequestSelectedModel] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> LipsyncTtsPageResponse:
+ """
+ Parameters
+ ----------
+ text_prompt : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ tts_provider : typing.Optional[LipsyncTtsPageRequestTtsProvider]
+
+ uberduck_voice_name : typing.Optional[str]
+
+ uberduck_speaking_rate : typing.Optional[float]
+
+ google_voice_name : typing.Optional[str]
+
+ google_speaking_rate : typing.Optional[float]
+
+ google_pitch : typing.Optional[float]
+
+ bark_history_prompt : typing.Optional[str]
+
+ elevenlabs_voice_name : typing.Optional[str]
+ Use `elevenlabs_voice_id` instead
+
+ elevenlabs_api_key : typing.Optional[str]
+
+ elevenlabs_voice_id : typing.Optional[str]
+
+ elevenlabs_model : typing.Optional[str]
+
+ elevenlabs_stability : typing.Optional[float]
+
+ elevenlabs_similarity_boost : typing.Optional[float]
+
+ elevenlabs_style : typing.Optional[float]
+
+ elevenlabs_speaker_boost : typing.Optional[bool]
+
+ azure_voice_name : typing.Optional[str]
+
+ openai_voice_name : typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName]
+
+ openai_tts_model : typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel]
+
+ input_face : typing.Optional[str]
+
+ face_padding_top : typing.Optional[int]
+
+ face_padding_bottom : typing.Optional[int]
+
+ face_padding_left : typing.Optional[int]
+
+ face_padding_right : typing.Optional[int]
+
+ sadtalker_settings : typing.Optional[SadTalkerSettings]
+
+ selected_model : typing.Optional[LipsyncTtsPageRequestSelectedModel]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ LipsyncTtsPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.lipsync_video_with_any_text.lipsync_tts(
+ text_prompt="text_prompt",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v2/LipsyncTTS/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "text_prompt": text_prompt,
+ "tts_provider": tts_provider,
+ "uberduck_voice_name": uberduck_voice_name,
+ "uberduck_speaking_rate": uberduck_speaking_rate,
+ "google_voice_name": google_voice_name,
+ "google_speaking_rate": google_speaking_rate,
+ "google_pitch": google_pitch,
+ "bark_history_prompt": bark_history_prompt,
+ "elevenlabs_voice_name": elevenlabs_voice_name,
+ "elevenlabs_api_key": elevenlabs_api_key,
+ "elevenlabs_voice_id": elevenlabs_voice_id,
+ "elevenlabs_model": elevenlabs_model,
+ "elevenlabs_stability": elevenlabs_stability,
+ "elevenlabs_similarity_boost": elevenlabs_similarity_boost,
+ "elevenlabs_style": elevenlabs_style,
+ "elevenlabs_speaker_boost": elevenlabs_speaker_boost,
+ "azure_voice_name": azure_voice_name,
+ "openai_voice_name": openai_voice_name,
+ "openai_tts_model": openai_tts_model,
+ "input_face": input_face,
+ "face_padding_top": face_padding_top,
+ "face_padding_bottom": face_padding_bottom,
+ "face_padding_left": face_padding_left,
+ "face_padding_right": face_padding_right,
+ "sadtalker_settings": sadtalker_settings,
+ "selected_model": selected_model,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(LipsyncTtsPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def async_lipsync_tts(
+ self,
+ *,
+ text_prompt: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ tts_provider: typing.Optional[LipsyncTtsPageRequestTtsProvider] = OMIT,
+ uberduck_voice_name: typing.Optional[str] = OMIT,
+ uberduck_speaking_rate: typing.Optional[float] = OMIT,
+ google_voice_name: typing.Optional[str] = OMIT,
+ google_speaking_rate: typing.Optional[float] = OMIT,
+ google_pitch: typing.Optional[float] = OMIT,
+ bark_history_prompt: typing.Optional[str] = OMIT,
+ elevenlabs_voice_name: typing.Optional[str] = OMIT,
+ elevenlabs_api_key: typing.Optional[str] = OMIT,
+ elevenlabs_voice_id: typing.Optional[str] = OMIT,
+ elevenlabs_model: typing.Optional[str] = OMIT,
+ elevenlabs_stability: typing.Optional[float] = OMIT,
+ elevenlabs_similarity_boost: typing.Optional[float] = OMIT,
+ elevenlabs_style: typing.Optional[float] = OMIT,
+ elevenlabs_speaker_boost: typing.Optional[bool] = OMIT,
+ azure_voice_name: typing.Optional[str] = OMIT,
+ openai_voice_name: typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] = OMIT,
+ openai_tts_model: typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] = OMIT,
+ input_face: typing.Optional[str] = OMIT,
+ face_padding_top: typing.Optional[int] = OMIT,
+ face_padding_bottom: typing.Optional[int] = OMIT,
+ face_padding_left: typing.Optional[int] = OMIT,
+ face_padding_right: typing.Optional[int] = OMIT,
+ sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT,
+ selected_model: typing.Optional[LipsyncTtsPageRequestSelectedModel] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ text_prompt : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ tts_provider : typing.Optional[LipsyncTtsPageRequestTtsProvider]
+
+ uberduck_voice_name : typing.Optional[str]
+
+ uberduck_speaking_rate : typing.Optional[float]
+
+ google_voice_name : typing.Optional[str]
+
+ google_speaking_rate : typing.Optional[float]
+
+ google_pitch : typing.Optional[float]
+
+ bark_history_prompt : typing.Optional[str]
+
+ elevenlabs_voice_name : typing.Optional[str]
+ Use `elevenlabs_voice_id` instead
+
+ elevenlabs_api_key : typing.Optional[str]
+
+ elevenlabs_voice_id : typing.Optional[str]
+
+ elevenlabs_model : typing.Optional[str]
+
+ elevenlabs_stability : typing.Optional[float]
+
+ elevenlabs_similarity_boost : typing.Optional[float]
+
+ elevenlabs_style : typing.Optional[float]
+
+ elevenlabs_speaker_boost : typing.Optional[bool]
+
+ azure_voice_name : typing.Optional[str]
+
+ openai_voice_name : typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName]
+
+ openai_tts_model : typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel]
+
+ input_face : typing.Optional[str]
+
+ face_padding_top : typing.Optional[int]
+
+ face_padding_bottom : typing.Optional[int]
+
+ face_padding_left : typing.Optional[int]
+
+ face_padding_right : typing.Optional[int]
+
+ sadtalker_settings : typing.Optional[SadTalkerSettings]
+
+ selected_model : typing.Optional[LipsyncTtsPageRequestSelectedModel]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.lipsync_video_with_any_text.async_lipsync_tts(
+ text_prompt="text_prompt",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/LipsyncTTS/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "text_prompt": text_prompt,
+ "tts_provider": tts_provider,
+ "uberduck_voice_name": uberduck_voice_name,
+ "uberduck_speaking_rate": uberduck_speaking_rate,
+ "google_voice_name": google_voice_name,
+ "google_speaking_rate": google_speaking_rate,
+ "google_pitch": google_pitch,
+ "bark_history_prompt": bark_history_prompt,
+ "elevenlabs_voice_name": elevenlabs_voice_name,
+ "elevenlabs_api_key": elevenlabs_api_key,
+ "elevenlabs_voice_id": elevenlabs_voice_id,
+ "elevenlabs_model": elevenlabs_model,
+ "elevenlabs_stability": elevenlabs_stability,
+ "elevenlabs_similarity_boost": elevenlabs_similarity_boost,
+ "elevenlabs_style": elevenlabs_style,
+ "elevenlabs_speaker_boost": elevenlabs_speaker_boost,
+ "azure_voice_name": azure_voice_name,
+ "openai_voice_name": openai_voice_name,
+ "openai_tts_model": openai_tts_model,
+ "input_face": input_face,
+ "face_padding_top": face_padding_top,
+ "face_padding_bottom": face_padding_bottom,
+ "face_padding_left": face_padding_left,
+ "face_padding_right": face_padding_right,
+ "sadtalker_settings": sadtalker_settings,
+ "selected_model": selected_model,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def status_lipsync_tts(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> LipsyncTtsPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ LipsyncTtsPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.lipsync_video_with_any_text.status_lipsync_tts(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/LipsyncTTS/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(LipsyncTtsPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncLipsyncVideoWithAnyTextClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def lipsync_tts(
+ self,
+ *,
+ text_prompt: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ tts_provider: typing.Optional[LipsyncTtsPageRequestTtsProvider] = OMIT,
+ uberduck_voice_name: typing.Optional[str] = OMIT,
+ uberduck_speaking_rate: typing.Optional[float] = OMIT,
+ google_voice_name: typing.Optional[str] = OMIT,
+ google_speaking_rate: typing.Optional[float] = OMIT,
+ google_pitch: typing.Optional[float] = OMIT,
+ bark_history_prompt: typing.Optional[str] = OMIT,
+ elevenlabs_voice_name: typing.Optional[str] = OMIT,
+ elevenlabs_api_key: typing.Optional[str] = OMIT,
+ elevenlabs_voice_id: typing.Optional[str] = OMIT,
+ elevenlabs_model: typing.Optional[str] = OMIT,
+ elevenlabs_stability: typing.Optional[float] = OMIT,
+ elevenlabs_similarity_boost: typing.Optional[float] = OMIT,
+ elevenlabs_style: typing.Optional[float] = OMIT,
+ elevenlabs_speaker_boost: typing.Optional[bool] = OMIT,
+ azure_voice_name: typing.Optional[str] = OMIT,
+ openai_voice_name: typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] = OMIT,
+ openai_tts_model: typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] = OMIT,
+ input_face: typing.Optional[str] = OMIT,
+ face_padding_top: typing.Optional[int] = OMIT,
+ face_padding_bottom: typing.Optional[int] = OMIT,
+ face_padding_left: typing.Optional[int] = OMIT,
+ face_padding_right: typing.Optional[int] = OMIT,
+ sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT,
+ selected_model: typing.Optional[LipsyncTtsPageRequestSelectedModel] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> LipsyncTtsPageResponse:
+ """
+ Parameters
+ ----------
+ text_prompt : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ tts_provider : typing.Optional[LipsyncTtsPageRequestTtsProvider]
+
+ uberduck_voice_name : typing.Optional[str]
+
+ uberduck_speaking_rate : typing.Optional[float]
+
+ google_voice_name : typing.Optional[str]
+
+ google_speaking_rate : typing.Optional[float]
+
+ google_pitch : typing.Optional[float]
+
+ bark_history_prompt : typing.Optional[str]
+
+ elevenlabs_voice_name : typing.Optional[str]
+ Use `elevenlabs_voice_id` instead
+
+ elevenlabs_api_key : typing.Optional[str]
+
+ elevenlabs_voice_id : typing.Optional[str]
+
+ elevenlabs_model : typing.Optional[str]
+
+ elevenlabs_stability : typing.Optional[float]
+
+ elevenlabs_similarity_boost : typing.Optional[float]
+
+ elevenlabs_style : typing.Optional[float]
+
+ elevenlabs_speaker_boost : typing.Optional[bool]
+
+ azure_voice_name : typing.Optional[str]
+
+ openai_voice_name : typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName]
+
+ openai_tts_model : typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel]
+
+ input_face : typing.Optional[str]
+
+ face_padding_top : typing.Optional[int]
+
+ face_padding_bottom : typing.Optional[int]
+
+ face_padding_left : typing.Optional[int]
+
+ face_padding_right : typing.Optional[int]
+
+ sadtalker_settings : typing.Optional[SadTalkerSettings]
+
+ selected_model : typing.Optional[LipsyncTtsPageRequestSelectedModel]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ LipsyncTtsPageResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.lipsync_video_with_any_text.lipsync_tts(
+ text_prompt="text_prompt",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v2/LipsyncTTS/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "text_prompt": text_prompt,
+ "tts_provider": tts_provider,
+ "uberduck_voice_name": uberduck_voice_name,
+ "uberduck_speaking_rate": uberduck_speaking_rate,
+ "google_voice_name": google_voice_name,
+ "google_speaking_rate": google_speaking_rate,
+ "google_pitch": google_pitch,
+ "bark_history_prompt": bark_history_prompt,
+ "elevenlabs_voice_name": elevenlabs_voice_name,
+ "elevenlabs_api_key": elevenlabs_api_key,
+ "elevenlabs_voice_id": elevenlabs_voice_id,
+ "elevenlabs_model": elevenlabs_model,
+ "elevenlabs_stability": elevenlabs_stability,
+ "elevenlabs_similarity_boost": elevenlabs_similarity_boost,
+ "elevenlabs_style": elevenlabs_style,
+ "elevenlabs_speaker_boost": elevenlabs_speaker_boost,
+ "azure_voice_name": azure_voice_name,
+ "openai_voice_name": openai_voice_name,
+ "openai_tts_model": openai_tts_model,
+ "input_face": input_face,
+ "face_padding_top": face_padding_top,
+ "face_padding_bottom": face_padding_bottom,
+ "face_padding_left": face_padding_left,
+ "face_padding_right": face_padding_right,
+ "sadtalker_settings": sadtalker_settings,
+ "selected_model": selected_model,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(LipsyncTtsPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def async_lipsync_tts(
+ self,
+ *,
+ text_prompt: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ tts_provider: typing.Optional[LipsyncTtsPageRequestTtsProvider] = OMIT,
+ uberduck_voice_name: typing.Optional[str] = OMIT,
+ uberduck_speaking_rate: typing.Optional[float] = OMIT,
+ google_voice_name: typing.Optional[str] = OMIT,
+ google_speaking_rate: typing.Optional[float] = OMIT,
+ google_pitch: typing.Optional[float] = OMIT,
+ bark_history_prompt: typing.Optional[str] = OMIT,
+ elevenlabs_voice_name: typing.Optional[str] = OMIT,
+ elevenlabs_api_key: typing.Optional[str] = OMIT,
+ elevenlabs_voice_id: typing.Optional[str] = OMIT,
+ elevenlabs_model: typing.Optional[str] = OMIT,
+ elevenlabs_stability: typing.Optional[float] = OMIT,
+ elevenlabs_similarity_boost: typing.Optional[float] = OMIT,
+ elevenlabs_style: typing.Optional[float] = OMIT,
+ elevenlabs_speaker_boost: typing.Optional[bool] = OMIT,
+ azure_voice_name: typing.Optional[str] = OMIT,
+ openai_voice_name: typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] = OMIT,
+ openai_tts_model: typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] = OMIT,
+ input_face: typing.Optional[str] = OMIT,
+ face_padding_top: typing.Optional[int] = OMIT,
+ face_padding_bottom: typing.Optional[int] = OMIT,
+ face_padding_left: typing.Optional[int] = OMIT,
+ face_padding_right: typing.Optional[int] = OMIT,
+ sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT,
+ selected_model: typing.Optional[LipsyncTtsPageRequestSelectedModel] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ text_prompt : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ tts_provider : typing.Optional[LipsyncTtsPageRequestTtsProvider]
+
+ uberduck_voice_name : typing.Optional[str]
+
+ uberduck_speaking_rate : typing.Optional[float]
+
+ google_voice_name : typing.Optional[str]
+
+ google_speaking_rate : typing.Optional[float]
+
+ google_pitch : typing.Optional[float]
+
+ bark_history_prompt : typing.Optional[str]
+
+ elevenlabs_voice_name : typing.Optional[str]
+ Use `elevenlabs_voice_id` instead
+
+ elevenlabs_api_key : typing.Optional[str]
+
+ elevenlabs_voice_id : typing.Optional[str]
+
+ elevenlabs_model : typing.Optional[str]
+
+ elevenlabs_stability : typing.Optional[float]
+
+ elevenlabs_similarity_boost : typing.Optional[float]
+
+ elevenlabs_style : typing.Optional[float]
+
+ elevenlabs_speaker_boost : typing.Optional[bool]
+
+ azure_voice_name : typing.Optional[str]
+
+ openai_voice_name : typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName]
+
+ openai_tts_model : typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel]
+
+ input_face : typing.Optional[str]
+
+ face_padding_top : typing.Optional[int]
+
+ face_padding_bottom : typing.Optional[int]
+
+ face_padding_left : typing.Optional[int]
+
+ face_padding_right : typing.Optional[int]
+
+ sadtalker_settings : typing.Optional[SadTalkerSettings]
+
+ selected_model : typing.Optional[LipsyncTtsPageRequestSelectedModel]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.lipsync_video_with_any_text.async_lipsync_tts(
+ text_prompt="text_prompt",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/LipsyncTTS/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "text_prompt": text_prompt,
+ "tts_provider": tts_provider,
+ "uberduck_voice_name": uberduck_voice_name,
+ "uberduck_speaking_rate": uberduck_speaking_rate,
+ "google_voice_name": google_voice_name,
+ "google_speaking_rate": google_speaking_rate,
+ "google_pitch": google_pitch,
+ "bark_history_prompt": bark_history_prompt,
+ "elevenlabs_voice_name": elevenlabs_voice_name,
+ "elevenlabs_api_key": elevenlabs_api_key,
+ "elevenlabs_voice_id": elevenlabs_voice_id,
+ "elevenlabs_model": elevenlabs_model,
+ "elevenlabs_stability": elevenlabs_stability,
+ "elevenlabs_similarity_boost": elevenlabs_similarity_boost,
+ "elevenlabs_style": elevenlabs_style,
+ "elevenlabs_speaker_boost": elevenlabs_speaker_boost,
+ "azure_voice_name": azure_voice_name,
+ "openai_voice_name": openai_voice_name,
+ "openai_tts_model": openai_tts_model,
+ "input_face": input_face,
+ "face_padding_top": face_padding_top,
+ "face_padding_bottom": face_padding_bottom,
+ "face_padding_left": face_padding_left,
+ "face_padding_right": face_padding_right,
+ "sadtalker_settings": sadtalker_settings,
+ "selected_model": selected_model,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def status_lipsync_tts(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> LipsyncTtsPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ LipsyncTtsPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.lipsync_video_with_any_text.status_lipsync_tts(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/LipsyncTTS/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(LipsyncTtsPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/misc/__init__.py b/src/gooey/misc/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/misc/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/misc/client.py b/src/gooey/misc/client.py
new file mode 100644
index 0000000..9c5f262
--- /dev/null
+++ b/src/gooey/misc/client.py
@@ -0,0 +1,351 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import pydantic_v1
+from ..core.request_options import RequestOptions
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.balance_response import BalanceResponse
+from ..types.bot_broadcast_filters import BotBroadcastFilters
+from ..types.http_validation_error import HttpValidationError
+from ..types.reply_button import ReplyButton
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class MiscClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def get_balance(self, *, request_options: typing.Optional[RequestOptions] = None) -> BalanceResponse:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ BalanceResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.misc.get_balance()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v1/balance/", method="GET", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(BalanceResponse, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def video_bots_broadcast(
+ self,
+ *,
+ text: str,
+ example_id: typing.Optional[str] = None,
+ run_id: typing.Optional[str] = None,
+ audio: typing.Optional[str] = OMIT,
+ video: typing.Optional[str] = OMIT,
+ documents: typing.Optional[typing.Sequence[str]] = OMIT,
+ buttons: typing.Optional[typing.Sequence[ReplyButton]] = OMIT,
+ filters: typing.Optional[BotBroadcastFilters] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.Any:
+ """
+ Parameters
+ ----------
+ text : str
+ Message to broadcast to all users
+
+ example_id : typing.Optional[str]
+
+ run_id : typing.Optional[str]
+
+ audio : typing.Optional[str]
+ Audio URL to send to all users
+
+ video : typing.Optional[str]
+ Video URL to send to all users
+
+ documents : typing.Optional[typing.Sequence[str]]
+ Video URL to send to all users
+
+ buttons : typing.Optional[typing.Sequence[ReplyButton]]
+ Buttons to send to all users
+
+ filters : typing.Optional[BotBroadcastFilters]
+ Filters to select users to broadcast to. If not provided, will broadcast to all users of this bot.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.Any
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.misc.video_bots_broadcast(
+ text="text",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v2/video-bots/broadcast/send/",
+ method="POST",
+ params={"example_id": example_id, "run_id": run_id},
+ json={
+ "text": text,
+ "audio": audio,
+ "video": video,
+ "documents": documents,
+ "buttons": buttons,
+ "filters": filters,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def health(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.Any
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.misc.health()
+ """
+ _response = self._client_wrapper.httpx_client.request(method="GET", request_options=request_options)
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncMiscClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def get_balance(self, *, request_options: typing.Optional[RequestOptions] = None) -> BalanceResponse:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ BalanceResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.misc.get_balance()
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v1/balance/", method="GET", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(BalanceResponse, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def video_bots_broadcast(
+ self,
+ *,
+ text: str,
+ example_id: typing.Optional[str] = None,
+ run_id: typing.Optional[str] = None,
+ audio: typing.Optional[str] = OMIT,
+ video: typing.Optional[str] = OMIT,
+ documents: typing.Optional[typing.Sequence[str]] = OMIT,
+ buttons: typing.Optional[typing.Sequence[ReplyButton]] = OMIT,
+ filters: typing.Optional[BotBroadcastFilters] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.Any:
+ """
+ Parameters
+ ----------
+ text : str
+ Message to broadcast to all users
+
+ example_id : typing.Optional[str]
+
+ run_id : typing.Optional[str]
+
+ audio : typing.Optional[str]
+ Audio URL to send to all users
+
+ video : typing.Optional[str]
+ Video URL to send to all users
+
+ documents : typing.Optional[typing.Sequence[str]]
+ Video URL to send to all users
+
+ buttons : typing.Optional[typing.Sequence[ReplyButton]]
+ Buttons to send to all users
+
+ filters : typing.Optional[BotBroadcastFilters]
+ Filters to select users to broadcast to. If not provided, will broadcast to all users of this bot.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.Any
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.misc.video_bots_broadcast(
+ text="text",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v2/video-bots/broadcast/send/",
+ method="POST",
+ params={"example_id": example_id, "run_id": run_id},
+ json={
+ "text": text,
+ "audio": audio,
+ "video": video,
+ "documents": documents,
+ "buttons": buttons,
+ "filters": filters,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def health(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.Any
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.misc.health()
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(method="GET", request_options=request_options)
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/people_also_ask_answers_from_a_doc/__init__.py b/src/gooey/people_also_ask_answers_from_a_doc/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/people_also_ask_answers_from_a_doc/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/people_also_ask_answers_from_a_doc/client.py b/src/gooey/people_also_ask_answers_from_a_doc/client.py
new file mode 100644
index 0000000..8bd5186
--- /dev/null
+++ b/src/gooey/people_also_ask_answers_from_a_doc/client.py
@@ -0,0 +1,800 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import pydantic_v1
+from ..core.request_options import RequestOptions
+from ..errors.internal_server_error import InternalServerError
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3
+from ..types.failed_reponse_model_v2 import FailedReponseModelV2
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.recipe_function import RecipeFunction
+from ..types.related_qn_a_doc_page_request_citation_style import RelatedQnADocPageRequestCitationStyle
+from ..types.related_qn_a_doc_page_request_embedding_model import RelatedQnADocPageRequestEmbeddingModel
+from ..types.related_qn_a_doc_page_request_keyword_query import RelatedQnADocPageRequestKeywordQuery
+from ..types.related_qn_a_doc_page_request_selected_model import RelatedQnADocPageRequestSelectedModel
+from ..types.related_qn_a_doc_page_response import RelatedQnADocPageResponse
+from ..types.related_qn_a_doc_page_status_response import RelatedQnADocPageStatusResponse
+from ..types.run_settings import RunSettings
+from ..types.serp_search_location import SerpSearchLocation
+from ..types.serp_search_type import SerpSearchType
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class PeopleAlsoAskAnswersFromADocClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def related_qna_maker_doc(
+ self,
+ *,
+ search_query: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ keyword_query: typing.Optional[RelatedQnADocPageRequestKeywordQuery] = OMIT,
+ documents: typing.Optional[typing.Sequence[str]] = OMIT,
+ max_references: typing.Optional[int] = OMIT,
+ max_context_words: typing.Optional[int] = OMIT,
+ scroll_jump: typing.Optional[int] = OMIT,
+ doc_extract_url: typing.Optional[str] = OMIT,
+ embedding_model: typing.Optional[RelatedQnADocPageRequestEmbeddingModel] = OMIT,
+ dense_weight: typing.Optional[float] = OMIT,
+ task_instructions: typing.Optional[str] = OMIT,
+ query_instructions: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[RelatedQnADocPageRequestSelectedModel] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ citation_style: typing.Optional[RelatedQnADocPageRequestCitationStyle] = OMIT,
+ serp_search_location: typing.Optional[SerpSearchLocation] = OMIT,
+ scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
+ serp_search_type: typing.Optional[SerpSearchType] = OMIT,
+ scaleserp_search_field: typing.Optional[str] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> RelatedQnADocPageResponse:
+ """
+ Parameters
+ ----------
+ search_query : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ keyword_query : typing.Optional[RelatedQnADocPageRequestKeywordQuery]
+
+ documents : typing.Optional[typing.Sequence[str]]
+
+ max_references : typing.Optional[int]
+
+ max_context_words : typing.Optional[int]
+
+ scroll_jump : typing.Optional[int]
+
+ doc_extract_url : typing.Optional[str]
+
+ embedding_model : typing.Optional[RelatedQnADocPageRequestEmbeddingModel]
+
+ dense_weight : typing.Optional[float]
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+ task_instructions : typing.Optional[str]
+
+ query_instructions : typing.Optional[str]
+
+ selected_model : typing.Optional[RelatedQnADocPageRequestSelectedModel]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ citation_style : typing.Optional[RelatedQnADocPageRequestCitationStyle]
+
+ serp_search_location : typing.Optional[SerpSearchLocation]
+
+ scaleserp_locations : typing.Optional[typing.Sequence[str]]
+ DEPRECATED: use `serp_search_location` instead
+
+ serp_search_type : typing.Optional[SerpSearchType]
+
+ scaleserp_search_field : typing.Optional[str]
+ DEPRECATED: use `serp_search_type` instead
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ RelatedQnADocPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.people_also_ask_answers_from_a_doc.related_qna_maker_doc(
+ search_query="search_query",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v2/related-qna-maker-doc/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "search_query": search_query,
+ "keyword_query": keyword_query,
+ "documents": documents,
+ "max_references": max_references,
+ "max_context_words": max_context_words,
+ "scroll_jump": scroll_jump,
+ "doc_extract_url": doc_extract_url,
+ "embedding_model": embedding_model,
+ "dense_weight": dense_weight,
+ "task_instructions": task_instructions,
+ "query_instructions": query_instructions,
+ "selected_model": selected_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "citation_style": citation_style,
+ "serp_search_location": serp_search_location,
+ "scaleserp_locations": scaleserp_locations,
+ "serp_search_type": serp_search_type,
+ "scaleserp_search_field": scaleserp_search_field,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(RelatedQnADocPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def async_related_qna_maker_doc(
+ self,
+ *,
+ search_query: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ keyword_query: typing.Optional[RelatedQnADocPageRequestKeywordQuery] = OMIT,
+ documents: typing.Optional[typing.Sequence[str]] = OMIT,
+ max_references: typing.Optional[int] = OMIT,
+ max_context_words: typing.Optional[int] = OMIT,
+ scroll_jump: typing.Optional[int] = OMIT,
+ doc_extract_url: typing.Optional[str] = OMIT,
+ embedding_model: typing.Optional[RelatedQnADocPageRequestEmbeddingModel] = OMIT,
+ dense_weight: typing.Optional[float] = OMIT,
+ task_instructions: typing.Optional[str] = OMIT,
+ query_instructions: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[RelatedQnADocPageRequestSelectedModel] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ citation_style: typing.Optional[RelatedQnADocPageRequestCitationStyle] = OMIT,
+ serp_search_location: typing.Optional[SerpSearchLocation] = OMIT,
+ scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
+ serp_search_type: typing.Optional[SerpSearchType] = OMIT,
+ scaleserp_search_field: typing.Optional[str] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ search_query : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ keyword_query : typing.Optional[RelatedQnADocPageRequestKeywordQuery]
+
+ documents : typing.Optional[typing.Sequence[str]]
+
+ max_references : typing.Optional[int]
+
+ max_context_words : typing.Optional[int]
+
+ scroll_jump : typing.Optional[int]
+
+ doc_extract_url : typing.Optional[str]
+
+ embedding_model : typing.Optional[RelatedQnADocPageRequestEmbeddingModel]
+
+ dense_weight : typing.Optional[float]
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+ task_instructions : typing.Optional[str]
+
+ query_instructions : typing.Optional[str]
+
+ selected_model : typing.Optional[RelatedQnADocPageRequestSelectedModel]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ citation_style : typing.Optional[RelatedQnADocPageRequestCitationStyle]
+
+ serp_search_location : typing.Optional[SerpSearchLocation]
+
+ scaleserp_locations : typing.Optional[typing.Sequence[str]]
+ DEPRECATED: use `serp_search_location` instead
+
+ serp_search_type : typing.Optional[SerpSearchType]
+
+ scaleserp_search_field : typing.Optional[str]
+ DEPRECATED: use `serp_search_type` instead
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.people_also_ask_answers_from_a_doc.async_related_qna_maker_doc(
+ search_query="search_query",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/related-qna-maker-doc/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "search_query": search_query,
+ "keyword_query": keyword_query,
+ "documents": documents,
+ "max_references": max_references,
+ "max_context_words": max_context_words,
+ "scroll_jump": scroll_jump,
+ "doc_extract_url": doc_extract_url,
+ "embedding_model": embedding_model,
+ "dense_weight": dense_weight,
+ "task_instructions": task_instructions,
+ "query_instructions": query_instructions,
+ "selected_model": selected_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "citation_style": citation_style,
+ "serp_search_location": serp_search_location,
+ "scaleserp_locations": scaleserp_locations,
+ "serp_search_type": serp_search_type,
+ "scaleserp_search_field": scaleserp_search_field,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def status_related_qna_maker_doc(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> RelatedQnADocPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ RelatedQnADocPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.people_also_ask_answers_from_a_doc.status_related_qna_maker_doc(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/related-qna-maker-doc/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(RelatedQnADocPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncPeopleAlsoAskAnswersFromADocClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def related_qna_maker_doc(
+ self,
+ *,
+ search_query: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ keyword_query: typing.Optional[RelatedQnADocPageRequestKeywordQuery] = OMIT,
+ documents: typing.Optional[typing.Sequence[str]] = OMIT,
+ max_references: typing.Optional[int] = OMIT,
+ max_context_words: typing.Optional[int] = OMIT,
+ scroll_jump: typing.Optional[int] = OMIT,
+ doc_extract_url: typing.Optional[str] = OMIT,
+ embedding_model: typing.Optional[RelatedQnADocPageRequestEmbeddingModel] = OMIT,
+ dense_weight: typing.Optional[float] = OMIT,
+ task_instructions: typing.Optional[str] = OMIT,
+ query_instructions: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[RelatedQnADocPageRequestSelectedModel] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ citation_style: typing.Optional[RelatedQnADocPageRequestCitationStyle] = OMIT,
+ serp_search_location: typing.Optional[SerpSearchLocation] = OMIT,
+ scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
+ serp_search_type: typing.Optional[SerpSearchType] = OMIT,
+ scaleserp_search_field: typing.Optional[str] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> RelatedQnADocPageResponse:
+ """
+ Parameters
+ ----------
+ search_query : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ keyword_query : typing.Optional[RelatedQnADocPageRequestKeywordQuery]
+
+ documents : typing.Optional[typing.Sequence[str]]
+
+ max_references : typing.Optional[int]
+
+ max_context_words : typing.Optional[int]
+
+ scroll_jump : typing.Optional[int]
+
+ doc_extract_url : typing.Optional[str]
+
+ embedding_model : typing.Optional[RelatedQnADocPageRequestEmbeddingModel]
+
+ dense_weight : typing.Optional[float]
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+ task_instructions : typing.Optional[str]
+
+ query_instructions : typing.Optional[str]
+
+ selected_model : typing.Optional[RelatedQnADocPageRequestSelectedModel]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ citation_style : typing.Optional[RelatedQnADocPageRequestCitationStyle]
+
+ serp_search_location : typing.Optional[SerpSearchLocation]
+
+ scaleserp_locations : typing.Optional[typing.Sequence[str]]
+ DEPRECATED: use `serp_search_location` instead
+
+ serp_search_type : typing.Optional[SerpSearchType]
+
+ scaleserp_search_field : typing.Optional[str]
+ DEPRECATED: use `serp_search_type` instead
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ RelatedQnADocPageResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.people_also_ask_answers_from_a_doc.related_qna_maker_doc(
+ search_query="search_query",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v2/related-qna-maker-doc/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "search_query": search_query,
+ "keyword_query": keyword_query,
+ "documents": documents,
+ "max_references": max_references,
+ "max_context_words": max_context_words,
+ "scroll_jump": scroll_jump,
+ "doc_extract_url": doc_extract_url,
+ "embedding_model": embedding_model,
+ "dense_weight": dense_weight,
+ "task_instructions": task_instructions,
+ "query_instructions": query_instructions,
+ "selected_model": selected_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "citation_style": citation_style,
+ "serp_search_location": serp_search_location,
+ "scaleserp_locations": scaleserp_locations,
+ "serp_search_type": serp_search_type,
+ "scaleserp_search_field": scaleserp_search_field,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(RelatedQnADocPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def async_related_qna_maker_doc(
+ self,
+ *,
+ search_query: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ keyword_query: typing.Optional[RelatedQnADocPageRequestKeywordQuery] = OMIT,
+ documents: typing.Optional[typing.Sequence[str]] = OMIT,
+ max_references: typing.Optional[int] = OMIT,
+ max_context_words: typing.Optional[int] = OMIT,
+ scroll_jump: typing.Optional[int] = OMIT,
+ doc_extract_url: typing.Optional[str] = OMIT,
+ embedding_model: typing.Optional[RelatedQnADocPageRequestEmbeddingModel] = OMIT,
+ dense_weight: typing.Optional[float] = OMIT,
+ task_instructions: typing.Optional[str] = OMIT,
+ query_instructions: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[RelatedQnADocPageRequestSelectedModel] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ citation_style: typing.Optional[RelatedQnADocPageRequestCitationStyle] = OMIT,
+ serp_search_location: typing.Optional[SerpSearchLocation] = OMIT,
+ scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
+ serp_search_type: typing.Optional[SerpSearchType] = OMIT,
+ scaleserp_search_field: typing.Optional[str] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ search_query : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ keyword_query : typing.Optional[RelatedQnADocPageRequestKeywordQuery]
+
+ documents : typing.Optional[typing.Sequence[str]]
+
+ max_references : typing.Optional[int]
+
+ max_context_words : typing.Optional[int]
+
+ scroll_jump : typing.Optional[int]
+
+ doc_extract_url : typing.Optional[str]
+
+ embedding_model : typing.Optional[RelatedQnADocPageRequestEmbeddingModel]
+
+ dense_weight : typing.Optional[float]
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+ task_instructions : typing.Optional[str]
+
+ query_instructions : typing.Optional[str]
+
+ selected_model : typing.Optional[RelatedQnADocPageRequestSelectedModel]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ citation_style : typing.Optional[RelatedQnADocPageRequestCitationStyle]
+
+ serp_search_location : typing.Optional[SerpSearchLocation]
+
+ scaleserp_locations : typing.Optional[typing.Sequence[str]]
+ DEPRECATED: use `serp_search_location` instead
+
+ serp_search_type : typing.Optional[SerpSearchType]
+
+ scaleserp_search_field : typing.Optional[str]
+ DEPRECATED: use `serp_search_type` instead
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.people_also_ask_answers_from_a_doc.async_related_qna_maker_doc(
+ search_query="search_query",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/related-qna-maker-doc/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "search_query": search_query,
+ "keyword_query": keyword_query,
+ "documents": documents,
+ "max_references": max_references,
+ "max_context_words": max_context_words,
+ "scroll_jump": scroll_jump,
+ "doc_extract_url": doc_extract_url,
+ "embedding_model": embedding_model,
+ "dense_weight": dense_weight,
+ "task_instructions": task_instructions,
+ "query_instructions": query_instructions,
+ "selected_model": selected_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "citation_style": citation_style,
+ "serp_search_location": serp_search_location,
+ "scaleserp_locations": scaleserp_locations,
+ "serp_search_type": serp_search_type,
+ "scaleserp_search_field": scaleserp_search_field,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def status_related_qna_maker_doc(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> RelatedQnADocPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ RelatedQnADocPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.people_also_ask_answers_from_a_doc.status_related_qna_maker_doc(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/related-qna-maker-doc/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(RelatedQnADocPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/profile_lookup_gpt3for_ai_personalized_emails/__init__.py b/src/gooey/profile_lookup_gpt3for_ai_personalized_emails/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/profile_lookup_gpt3for_ai_personalized_emails/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/profile_lookup_gpt3for_ai_personalized_emails/client.py b/src/gooey/profile_lookup_gpt3for_ai_personalized_emails/client.py
new file mode 100644
index 0000000..c2fbb16
--- /dev/null
+++ b/src/gooey/profile_lookup_gpt3for_ai_personalized_emails/client.py
@@ -0,0 +1,555 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import pydantic_v1
+from ..core.request_options import RequestOptions
+from ..errors.internal_server_error import InternalServerError
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3
+from ..types.failed_reponse_model_v2 import FailedReponseModelV2
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.recipe_function import RecipeFunction
+from ..types.run_settings import RunSettings
+from ..types.social_lookup_email_page_request_selected_model import SocialLookupEmailPageRequestSelectedModel
+from ..types.social_lookup_email_page_response import SocialLookupEmailPageResponse
+from ..types.social_lookup_email_page_status_response import SocialLookupEmailPageStatusResponse
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class ProfileLookupGpt3ForAiPersonalizedEmailsClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def social_lookup_email(
+ self,
+ *,
+ email_address: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ input_prompt: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[SocialLookupEmailPageRequestSelectedModel] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> SocialLookupEmailPageResponse:
+ """
+ Parameters
+ ----------
+ email_address : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ input_prompt : typing.Optional[str]
+
+ selected_model : typing.Optional[SocialLookupEmailPageRequestSelectedModel]
+
+ num_outputs : typing.Optional[int]
+
+ avoid_repetition : typing.Optional[bool]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ SocialLookupEmailPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.profile_lookup_gpt3for_ai_personalized_emails.social_lookup_email(
+ email_address="email_address",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v2/SocialLookupEmail/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "email_address": email_address,
+ "input_prompt": input_prompt,
+ "selected_model": selected_model,
+ "num_outputs": num_outputs,
+ "avoid_repetition": avoid_repetition,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(SocialLookupEmailPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def async_social_lookup_email(
+ self,
+ *,
+ email_address: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ input_prompt: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[SocialLookupEmailPageRequestSelectedModel] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ email_address : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ input_prompt : typing.Optional[str]
+
+ selected_model : typing.Optional[SocialLookupEmailPageRequestSelectedModel]
+
+ num_outputs : typing.Optional[int]
+
+ avoid_repetition : typing.Optional[bool]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.profile_lookup_gpt3for_ai_personalized_emails.async_social_lookup_email(
+ email_address="email_address",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/SocialLookupEmail/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "email_address": email_address,
+ "input_prompt": input_prompt,
+ "selected_model": selected_model,
+ "num_outputs": num_outputs,
+ "avoid_repetition": avoid_repetition,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def status_social_lookup_email(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> SocialLookupEmailPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ SocialLookupEmailPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.profile_lookup_gpt3for_ai_personalized_emails.status_social_lookup_email(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/SocialLookupEmail/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(SocialLookupEmailPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncProfileLookupGpt3ForAiPersonalizedEmailsClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def social_lookup_email(
+ self,
+ *,
+ email_address: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ input_prompt: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[SocialLookupEmailPageRequestSelectedModel] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> SocialLookupEmailPageResponse:
+ """
+ Parameters
+ ----------
+ email_address : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ input_prompt : typing.Optional[str]
+
+ selected_model : typing.Optional[SocialLookupEmailPageRequestSelectedModel]
+
+ num_outputs : typing.Optional[int]
+
+ avoid_repetition : typing.Optional[bool]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ SocialLookupEmailPageResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.profile_lookup_gpt3for_ai_personalized_emails.social_lookup_email(
+ email_address="email_address",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v2/SocialLookupEmail/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "email_address": email_address,
+ "input_prompt": input_prompt,
+ "selected_model": selected_model,
+ "num_outputs": num_outputs,
+ "avoid_repetition": avoid_repetition,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(SocialLookupEmailPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def async_social_lookup_email(
+ self,
+ *,
+ email_address: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ input_prompt: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[SocialLookupEmailPageRequestSelectedModel] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ email_address : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ input_prompt : typing.Optional[str]
+
+ selected_model : typing.Optional[SocialLookupEmailPageRequestSelectedModel]
+
+ num_outputs : typing.Optional[int]
+
+ avoid_repetition : typing.Optional[bool]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.profile_lookup_gpt3for_ai_personalized_emails.async_social_lookup_email(
+ email_address="email_address",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/SocialLookupEmail/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "email_address": email_address,
+ "input_prompt": input_prompt,
+ "selected_model": selected_model,
+ "num_outputs": num_outputs,
+ "avoid_repetition": avoid_repetition,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def status_social_lookup_email(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> SocialLookupEmailPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ SocialLookupEmailPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.profile_lookup_gpt3for_ai_personalized_emails.status_social_lookup_email(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/SocialLookupEmail/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(SocialLookupEmailPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/py.typed b/src/gooey/py.typed
new file mode 100644
index 0000000..e69de29
diff --git a/src/gooey/render_image_search_results_with_ai/__init__.py b/src/gooey/render_image_search_results_with_ai/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/render_image_search_results_with_ai/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/render_image_search_results_with_ai/client.py b/src/gooey/render_image_search_results_with_ai/client.py
new file mode 100644
index 0000000..18363e7
--- /dev/null
+++ b/src/gooey/render_image_search_results_with_ai/client.py
@@ -0,0 +1,644 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import pydantic_v1
+from ..core.request_options import RequestOptions
+from ..errors.internal_server_error import InternalServerError
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3
+from ..types.failed_reponse_model_v2 import FailedReponseModelV2
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.google_image_gen_page_request_selected_model import GoogleImageGenPageRequestSelectedModel
+from ..types.google_image_gen_page_response import GoogleImageGenPageResponse
+from ..types.google_image_gen_page_status_response import GoogleImageGenPageStatusResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.recipe_function import RecipeFunction
+from ..types.run_settings import RunSettings
+from ..types.serp_search_location import SerpSearchLocation
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class RenderImageSearchResultsWithAiClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def google_image_gen(
+ self,
+ *,
+ search_query: str,
+ text_prompt: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ serp_search_location: typing.Optional[SerpSearchLocation] = OMIT,
+ scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
+ selected_model: typing.Optional[GoogleImageGenPageRequestSelectedModel] = OMIT,
+ negative_prompt: typing.Optional[str] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[int] = OMIT,
+ guidance_scale: typing.Optional[float] = OMIT,
+ prompt_strength: typing.Optional[float] = OMIT,
+ sd2upscaling: typing.Optional[bool] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ image_guidance_scale: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> GoogleImageGenPageResponse:
+ """
+ Parameters
+ ----------
+ search_query : str
+
+ text_prompt : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ serp_search_location : typing.Optional[SerpSearchLocation]
+
+ scaleserp_locations : typing.Optional[typing.Sequence[str]]
+ DEPRECATED: use `serp_search_location` instead
+
+ selected_model : typing.Optional[GoogleImageGenPageRequestSelectedModel]
+
+ negative_prompt : typing.Optional[str]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ prompt_strength : typing.Optional[float]
+
+ sd2upscaling : typing.Optional[bool]
+
+ seed : typing.Optional[int]
+
+ image_guidance_scale : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GoogleImageGenPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.render_image_search_results_with_ai.google_image_gen(
+ search_query="search_query",
+ text_prompt="text_prompt",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v2/GoogleImageGen/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "serp_search_location": serp_search_location,
+ "scaleserp_locations": scaleserp_locations,
+ "search_query": search_query,
+ "text_prompt": text_prompt,
+ "selected_model": selected_model,
+ "negative_prompt": negative_prompt,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "guidance_scale": guidance_scale,
+ "prompt_strength": prompt_strength,
+ "sd_2_upscaling": sd2upscaling,
+ "seed": seed,
+ "image_guidance_scale": image_guidance_scale,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(GoogleImageGenPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def async_google_image_gen(
+ self,
+ *,
+ search_query: str,
+ text_prompt: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ serp_search_location: typing.Optional[SerpSearchLocation] = OMIT,
+ scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
+ selected_model: typing.Optional[GoogleImageGenPageRequestSelectedModel] = OMIT,
+ negative_prompt: typing.Optional[str] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[int] = OMIT,
+ guidance_scale: typing.Optional[float] = OMIT,
+ prompt_strength: typing.Optional[float] = OMIT,
+ sd2upscaling: typing.Optional[bool] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ image_guidance_scale: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ search_query : str
+
+ text_prompt : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ serp_search_location : typing.Optional[SerpSearchLocation]
+
+ scaleserp_locations : typing.Optional[typing.Sequence[str]]
+ DEPRECATED: use `serp_search_location` instead
+
+ selected_model : typing.Optional[GoogleImageGenPageRequestSelectedModel]
+
+ negative_prompt : typing.Optional[str]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ prompt_strength : typing.Optional[float]
+
+ sd2upscaling : typing.Optional[bool]
+
+ seed : typing.Optional[int]
+
+ image_guidance_scale : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.render_image_search_results_with_ai.async_google_image_gen(
+ search_query="search_query",
+ text_prompt="text_prompt",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/GoogleImageGen/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "serp_search_location": serp_search_location,
+ "scaleserp_locations": scaleserp_locations,
+ "search_query": search_query,
+ "text_prompt": text_prompt,
+ "selected_model": selected_model,
+ "negative_prompt": negative_prompt,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "guidance_scale": guidance_scale,
+ "prompt_strength": prompt_strength,
+ "sd_2_upscaling": sd2upscaling,
+ "seed": seed,
+ "image_guidance_scale": image_guidance_scale,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def status_google_image_gen(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> GoogleImageGenPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GoogleImageGenPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.render_image_search_results_with_ai.status_google_image_gen(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/GoogleImageGen/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(GoogleImageGenPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncRenderImageSearchResultsWithAiClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def google_image_gen(
+ self,
+ *,
+ search_query: str,
+ text_prompt: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ serp_search_location: typing.Optional[SerpSearchLocation] = OMIT,
+ scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
+ selected_model: typing.Optional[GoogleImageGenPageRequestSelectedModel] = OMIT,
+ negative_prompt: typing.Optional[str] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[int] = OMIT,
+ guidance_scale: typing.Optional[float] = OMIT,
+ prompt_strength: typing.Optional[float] = OMIT,
+ sd2upscaling: typing.Optional[bool] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ image_guidance_scale: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> GoogleImageGenPageResponse:
+ """
+ Parameters
+ ----------
+ search_query : str
+
+ text_prompt : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ serp_search_location : typing.Optional[SerpSearchLocation]
+
+ scaleserp_locations : typing.Optional[typing.Sequence[str]]
+ DEPRECATED: use `serp_search_location` instead
+
+ selected_model : typing.Optional[GoogleImageGenPageRequestSelectedModel]
+
+ negative_prompt : typing.Optional[str]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ prompt_strength : typing.Optional[float]
+
+ sd2upscaling : typing.Optional[bool]
+
+ seed : typing.Optional[int]
+
+ image_guidance_scale : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GoogleImageGenPageResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.render_image_search_results_with_ai.google_image_gen(
+ search_query="search_query",
+ text_prompt="text_prompt",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v2/GoogleImageGen/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "serp_search_location": serp_search_location,
+ "scaleserp_locations": scaleserp_locations,
+ "search_query": search_query,
+ "text_prompt": text_prompt,
+ "selected_model": selected_model,
+ "negative_prompt": negative_prompt,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "guidance_scale": guidance_scale,
+ "prompt_strength": prompt_strength,
+ "sd_2_upscaling": sd2upscaling,
+ "seed": seed,
+ "image_guidance_scale": image_guidance_scale,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(GoogleImageGenPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def async_google_image_gen(
+ self,
+ *,
+ search_query: str,
+ text_prompt: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ serp_search_location: typing.Optional[SerpSearchLocation] = OMIT,
+ scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
+ selected_model: typing.Optional[GoogleImageGenPageRequestSelectedModel] = OMIT,
+ negative_prompt: typing.Optional[str] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[int] = OMIT,
+ guidance_scale: typing.Optional[float] = OMIT,
+ prompt_strength: typing.Optional[float] = OMIT,
+ sd2upscaling: typing.Optional[bool] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ image_guidance_scale: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ search_query : str
+
+ text_prompt : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ serp_search_location : typing.Optional[SerpSearchLocation]
+
+ scaleserp_locations : typing.Optional[typing.Sequence[str]]
+ DEPRECATED: use `serp_search_location` instead
+
+ selected_model : typing.Optional[GoogleImageGenPageRequestSelectedModel]
+
+ negative_prompt : typing.Optional[str]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ prompt_strength : typing.Optional[float]
+
+ sd2upscaling : typing.Optional[bool]
+
+ seed : typing.Optional[int]
+
+ image_guidance_scale : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.render_image_search_results_with_ai.async_google_image_gen(
+ search_query="search_query",
+ text_prompt="text_prompt",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/GoogleImageGen/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "serp_search_location": serp_search_location,
+ "scaleserp_locations": scaleserp_locations,
+ "search_query": search_query,
+ "text_prompt": text_prompt,
+ "selected_model": selected_model,
+ "negative_prompt": negative_prompt,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "guidance_scale": guidance_scale,
+ "prompt_strength": prompt_strength,
+ "sd_2_upscaling": sd2upscaling,
+ "seed": seed,
+ "image_guidance_scale": image_guidance_scale,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def status_google_image_gen(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> GoogleImageGenPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GoogleImageGenPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.render_image_search_results_with_ai.status_google_image_gen(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/GoogleImageGen/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(GoogleImageGenPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/search_your_docs_with_gpt/__init__.py b/src/gooey/search_your_docs_with_gpt/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/search_your_docs_with_gpt/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/search_your_docs_with_gpt/client.py b/src/gooey/search_your_docs_with_gpt/client.py
new file mode 100644
index 0000000..0a940bc
--- /dev/null
+++ b/src/gooey/search_your_docs_with_gpt/client.py
@@ -0,0 +1,726 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import pydantic_v1
+from ..core.request_options import RequestOptions
+from ..errors.internal_server_error import InternalServerError
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3
+from ..types.doc_search_page_request_citation_style import DocSearchPageRequestCitationStyle
+from ..types.doc_search_page_request_embedding_model import DocSearchPageRequestEmbeddingModel
+from ..types.doc_search_page_request_keyword_query import DocSearchPageRequestKeywordQuery
+from ..types.doc_search_page_request_selected_model import DocSearchPageRequestSelectedModel
+from ..types.doc_search_page_response import DocSearchPageResponse
+from ..types.doc_search_page_status_response import DocSearchPageStatusResponse
+from ..types.failed_reponse_model_v2 import FailedReponseModelV2
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.recipe_function import RecipeFunction
+from ..types.run_settings import RunSettings
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class SearchYourDocsWithGptClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def doc_search(
+ self,
+ *,
+ search_query: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ keyword_query: typing.Optional[DocSearchPageRequestKeywordQuery] = OMIT,
+ documents: typing.Optional[typing.Sequence[str]] = OMIT,
+ max_references: typing.Optional[int] = OMIT,
+ max_context_words: typing.Optional[int] = OMIT,
+ scroll_jump: typing.Optional[int] = OMIT,
+ doc_extract_url: typing.Optional[str] = OMIT,
+ embedding_model: typing.Optional[DocSearchPageRequestEmbeddingModel] = OMIT,
+ dense_weight: typing.Optional[float] = OMIT,
+ task_instructions: typing.Optional[str] = OMIT,
+ query_instructions: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[DocSearchPageRequestSelectedModel] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ citation_style: typing.Optional[DocSearchPageRequestCitationStyle] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> DocSearchPageResponse:
+ """
+ Parameters
+ ----------
+ search_query : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ keyword_query : typing.Optional[DocSearchPageRequestKeywordQuery]
+
+ documents : typing.Optional[typing.Sequence[str]]
+
+ max_references : typing.Optional[int]
+
+ max_context_words : typing.Optional[int]
+
+ scroll_jump : typing.Optional[int]
+
+ doc_extract_url : typing.Optional[str]
+
+ embedding_model : typing.Optional[DocSearchPageRequestEmbeddingModel]
+
+ dense_weight : typing.Optional[float]
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+ task_instructions : typing.Optional[str]
+
+ query_instructions : typing.Optional[str]
+
+ selected_model : typing.Optional[DocSearchPageRequestSelectedModel]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ citation_style : typing.Optional[DocSearchPageRequestCitationStyle]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ DocSearchPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.search_your_docs_with_gpt.doc_search(
+ search_query="search_query",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v2/doc-search/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "search_query": search_query,
+ "keyword_query": keyword_query,
+ "documents": documents,
+ "max_references": max_references,
+ "max_context_words": max_context_words,
+ "scroll_jump": scroll_jump,
+ "doc_extract_url": doc_extract_url,
+ "embedding_model": embedding_model,
+ "dense_weight": dense_weight,
+ "task_instructions": task_instructions,
+ "query_instructions": query_instructions,
+ "selected_model": selected_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "citation_style": citation_style,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(DocSearchPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def async_doc_search(
+ self,
+ *,
+ search_query: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ keyword_query: typing.Optional[DocSearchPageRequestKeywordQuery] = OMIT,
+ documents: typing.Optional[typing.Sequence[str]] = OMIT,
+ max_references: typing.Optional[int] = OMIT,
+ max_context_words: typing.Optional[int] = OMIT,
+ scroll_jump: typing.Optional[int] = OMIT,
+ doc_extract_url: typing.Optional[str] = OMIT,
+ embedding_model: typing.Optional[DocSearchPageRequestEmbeddingModel] = OMIT,
+ dense_weight: typing.Optional[float] = OMIT,
+ task_instructions: typing.Optional[str] = OMIT,
+ query_instructions: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[DocSearchPageRequestSelectedModel] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ citation_style: typing.Optional[DocSearchPageRequestCitationStyle] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ search_query : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ keyword_query : typing.Optional[DocSearchPageRequestKeywordQuery]
+
+ documents : typing.Optional[typing.Sequence[str]]
+
+ max_references : typing.Optional[int]
+
+ max_context_words : typing.Optional[int]
+
+ scroll_jump : typing.Optional[int]
+
+ doc_extract_url : typing.Optional[str]
+
+ embedding_model : typing.Optional[DocSearchPageRequestEmbeddingModel]
+
+ dense_weight : typing.Optional[float]
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+ task_instructions : typing.Optional[str]
+
+ query_instructions : typing.Optional[str]
+
+ selected_model : typing.Optional[DocSearchPageRequestSelectedModel]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ citation_style : typing.Optional[DocSearchPageRequestCitationStyle]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.search_your_docs_with_gpt.async_doc_search(
+ search_query="search_query",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/doc-search/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "search_query": search_query,
+ "keyword_query": keyword_query,
+ "documents": documents,
+ "max_references": max_references,
+ "max_context_words": max_context_words,
+ "scroll_jump": scroll_jump,
+ "doc_extract_url": doc_extract_url,
+ "embedding_model": embedding_model,
+ "dense_weight": dense_weight,
+ "task_instructions": task_instructions,
+ "query_instructions": query_instructions,
+ "selected_model": selected_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "citation_style": citation_style,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def status_doc_search(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> DocSearchPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ DocSearchPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.search_your_docs_with_gpt.status_doc_search(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/doc-search/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(DocSearchPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncSearchYourDocsWithGptClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def doc_search(
+ self,
+ *,
+ search_query: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ keyword_query: typing.Optional[DocSearchPageRequestKeywordQuery] = OMIT,
+ documents: typing.Optional[typing.Sequence[str]] = OMIT,
+ max_references: typing.Optional[int] = OMIT,
+ max_context_words: typing.Optional[int] = OMIT,
+ scroll_jump: typing.Optional[int] = OMIT,
+ doc_extract_url: typing.Optional[str] = OMIT,
+ embedding_model: typing.Optional[DocSearchPageRequestEmbeddingModel] = OMIT,
+ dense_weight: typing.Optional[float] = OMIT,
+ task_instructions: typing.Optional[str] = OMIT,
+ query_instructions: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[DocSearchPageRequestSelectedModel] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ citation_style: typing.Optional[DocSearchPageRequestCitationStyle] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> DocSearchPageResponse:
+ """
+ Parameters
+ ----------
+ search_query : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ keyword_query : typing.Optional[DocSearchPageRequestKeywordQuery]
+
+ documents : typing.Optional[typing.Sequence[str]]
+
+ max_references : typing.Optional[int]
+
+ max_context_words : typing.Optional[int]
+
+ scroll_jump : typing.Optional[int]
+
+ doc_extract_url : typing.Optional[str]
+
+ embedding_model : typing.Optional[DocSearchPageRequestEmbeddingModel]
+
+ dense_weight : typing.Optional[float]
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+ task_instructions : typing.Optional[str]
+
+ query_instructions : typing.Optional[str]
+
+ selected_model : typing.Optional[DocSearchPageRequestSelectedModel]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ citation_style : typing.Optional[DocSearchPageRequestCitationStyle]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ DocSearchPageResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.search_your_docs_with_gpt.doc_search(
+ search_query="search_query",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v2/doc-search/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "search_query": search_query,
+ "keyword_query": keyword_query,
+ "documents": documents,
+ "max_references": max_references,
+ "max_context_words": max_context_words,
+ "scroll_jump": scroll_jump,
+ "doc_extract_url": doc_extract_url,
+ "embedding_model": embedding_model,
+ "dense_weight": dense_weight,
+ "task_instructions": task_instructions,
+ "query_instructions": query_instructions,
+ "selected_model": selected_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "citation_style": citation_style,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(DocSearchPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def async_doc_search(
+ self,
+ *,
+ search_query: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ keyword_query: typing.Optional[DocSearchPageRequestKeywordQuery] = OMIT,
+ documents: typing.Optional[typing.Sequence[str]] = OMIT,
+ max_references: typing.Optional[int] = OMIT,
+ max_context_words: typing.Optional[int] = OMIT,
+ scroll_jump: typing.Optional[int] = OMIT,
+ doc_extract_url: typing.Optional[str] = OMIT,
+ embedding_model: typing.Optional[DocSearchPageRequestEmbeddingModel] = OMIT,
+ dense_weight: typing.Optional[float] = OMIT,
+ task_instructions: typing.Optional[str] = OMIT,
+ query_instructions: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[DocSearchPageRequestSelectedModel] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ citation_style: typing.Optional[DocSearchPageRequestCitationStyle] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ search_query : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ keyword_query : typing.Optional[DocSearchPageRequestKeywordQuery]
+
+ documents : typing.Optional[typing.Sequence[str]]
+
+ max_references : typing.Optional[int]
+
+ max_context_words : typing.Optional[int]
+
+ scroll_jump : typing.Optional[int]
+
+ doc_extract_url : typing.Optional[str]
+
+ embedding_model : typing.Optional[DocSearchPageRequestEmbeddingModel]
+
+ dense_weight : typing.Optional[float]
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+ task_instructions : typing.Optional[str]
+
+ query_instructions : typing.Optional[str]
+
+ selected_model : typing.Optional[DocSearchPageRequestSelectedModel]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ citation_style : typing.Optional[DocSearchPageRequestCitationStyle]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.search_your_docs_with_gpt.async_doc_search(
+ search_query="search_query",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/doc-search/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "search_query": search_query,
+ "keyword_query": keyword_query,
+ "documents": documents,
+ "max_references": max_references,
+ "max_context_words": max_context_words,
+ "scroll_jump": scroll_jump,
+ "doc_extract_url": doc_extract_url,
+ "embedding_model": embedding_model,
+ "dense_weight": dense_weight,
+ "task_instructions": task_instructions,
+ "query_instructions": query_instructions,
+ "selected_model": selected_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "citation_style": citation_style,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def status_doc_search(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> DocSearchPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ DocSearchPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.search_your_docs_with_gpt.status_doc_search(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/doc-search/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(DocSearchPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/smart_gpt/__init__.py b/src/gooey/smart_gpt/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/smart_gpt/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/smart_gpt/client.py b/src/gooey/smart_gpt/client.py
new file mode 100644
index 0000000..bf32dc4
--- /dev/null
+++ b/src/gooey/smart_gpt/client.py
@@ -0,0 +1,587 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import pydantic_v1
+from ..core.request_options import RequestOptions
+from ..errors.internal_server_error import InternalServerError
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3
+from ..types.failed_reponse_model_v2 import FailedReponseModelV2
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.recipe_function import RecipeFunction
+from ..types.run_settings import RunSettings
+from ..types.smart_gpt_page_request_selected_model import SmartGptPageRequestSelectedModel
+from ..types.smart_gpt_page_response import SmartGptPageResponse
+from ..types.smart_gpt_page_status_response import SmartGptPageStatusResponse
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class SmartGptClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def post(
+ self,
+ *,
+ input_prompt: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ cot_prompt: typing.Optional[str] = OMIT,
+ reflexion_prompt: typing.Optional[str] = OMIT,
+ dera_prompt: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[SmartGptPageRequestSelectedModel] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> SmartGptPageResponse:
+ """
+ Parameters
+ ----------
+ input_prompt : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ cot_prompt : typing.Optional[str]
+
+ reflexion_prompt : typing.Optional[str]
+
+ dera_prompt : typing.Optional[str]
+
+ selected_model : typing.Optional[SmartGptPageRequestSelectedModel]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ SmartGptPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.smart_gpt.post(
+ input_prompt="input_prompt",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v2/SmartGPT/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "input_prompt": input_prompt,
+ "cot_prompt": cot_prompt,
+ "reflexion_prompt": reflexion_prompt,
+ "dera_prompt": dera_prompt,
+ "selected_model": selected_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(SmartGptPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def async_smart_gpt(
+ self,
+ *,
+ input_prompt: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ cot_prompt: typing.Optional[str] = OMIT,
+ reflexion_prompt: typing.Optional[str] = OMIT,
+ dera_prompt: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[SmartGptPageRequestSelectedModel] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ input_prompt : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ cot_prompt : typing.Optional[str]
+
+ reflexion_prompt : typing.Optional[str]
+
+ dera_prompt : typing.Optional[str]
+
+ selected_model : typing.Optional[SmartGptPageRequestSelectedModel]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.smart_gpt.async_smart_gpt(
+ input_prompt="input_prompt",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/SmartGPT/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "input_prompt": input_prompt,
+ "cot_prompt": cot_prompt,
+ "reflexion_prompt": reflexion_prompt,
+ "dera_prompt": dera_prompt,
+ "selected_model": selected_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def status_smart_gpt(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> SmartGptPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ SmartGptPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.smart_gpt.status_smart_gpt(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/SmartGPT/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(SmartGptPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncSmartGptClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def post(
+ self,
+ *,
+ input_prompt: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ cot_prompt: typing.Optional[str] = OMIT,
+ reflexion_prompt: typing.Optional[str] = OMIT,
+ dera_prompt: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[SmartGptPageRequestSelectedModel] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> SmartGptPageResponse:
+ """
+ Parameters
+ ----------
+ input_prompt : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ cot_prompt : typing.Optional[str]
+
+ reflexion_prompt : typing.Optional[str]
+
+ dera_prompt : typing.Optional[str]
+
+ selected_model : typing.Optional[SmartGptPageRequestSelectedModel]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ SmartGptPageResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.smart_gpt.post(
+ input_prompt="input_prompt",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v2/SmartGPT/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "input_prompt": input_prompt,
+ "cot_prompt": cot_prompt,
+ "reflexion_prompt": reflexion_prompt,
+ "dera_prompt": dera_prompt,
+ "selected_model": selected_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(SmartGptPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def async_smart_gpt(
+ self,
+ *,
+ input_prompt: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ cot_prompt: typing.Optional[str] = OMIT,
+ reflexion_prompt: typing.Optional[str] = OMIT,
+ dera_prompt: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[SmartGptPageRequestSelectedModel] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ input_prompt : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ cot_prompt : typing.Optional[str]
+
+ reflexion_prompt : typing.Optional[str]
+
+ dera_prompt : typing.Optional[str]
+
+ selected_model : typing.Optional[SmartGptPageRequestSelectedModel]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.smart_gpt.async_smart_gpt(
+ input_prompt="input_prompt",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/SmartGPT/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "input_prompt": input_prompt,
+ "cot_prompt": cot_prompt,
+ "reflexion_prompt": reflexion_prompt,
+ "dera_prompt": dera_prompt,
+ "selected_model": selected_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def status_smart_gpt(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> SmartGptPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ SmartGptPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.smart_gpt.status_smart_gpt(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/SmartGPT/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(SmartGptPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/speech_recognition_translation/__init__.py b/src/gooey/speech_recognition_translation/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/speech_recognition_translation/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/speech_recognition_translation/client.py b/src/gooey/speech_recognition_translation/client.py
new file mode 100644
index 0000000..ef5c915
--- /dev/null
+++ b/src/gooey/speech_recognition_translation/client.py
@@ -0,0 +1,585 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import pydantic_v1
+from ..core.request_options import RequestOptions
+from ..errors.internal_server_error import InternalServerError
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.asr_page_request_output_format import AsrPageRequestOutputFormat
+from ..types.asr_page_request_selected_model import AsrPageRequestSelectedModel
+from ..types.asr_page_request_translation_model import AsrPageRequestTranslationModel
+from ..types.asr_page_response import AsrPageResponse
+from ..types.asr_page_status_response import AsrPageStatusResponse
+from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3
+from ..types.failed_reponse_model_v2 import FailedReponseModelV2
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.recipe_function import RecipeFunction
+from ..types.run_settings import RunSettings
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class SpeechRecognitionTranslationClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def asr(
+ self,
+ *,
+ documents: typing.Sequence[str],
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ selected_model: typing.Optional[AsrPageRequestSelectedModel] = OMIT,
+ language: typing.Optional[str] = OMIT,
+ translation_model: typing.Optional[AsrPageRequestTranslationModel] = OMIT,
+ output_format: typing.Optional[AsrPageRequestOutputFormat] = OMIT,
+ google_translate_target: typing.Optional[str] = OMIT,
+ translation_source: typing.Optional[str] = OMIT,
+ translation_target: typing.Optional[str] = OMIT,
+ glossary_document: typing.Optional[str] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsrPageResponse:
+ """
+ Parameters
+ ----------
+ documents : typing.Sequence[str]
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ selected_model : typing.Optional[AsrPageRequestSelectedModel]
+
+ language : typing.Optional[str]
+
+ translation_model : typing.Optional[AsrPageRequestTranslationModel]
+
+ output_format : typing.Optional[AsrPageRequestOutputFormat]
+
+ google_translate_target : typing.Optional[str]
+ use `translation_model` & `translation_target` instead.
+
+ translation_source : typing.Optional[str]
+
+ translation_target : typing.Optional[str]
+
+ glossary_document : typing.Optional[str]
+ Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+ If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsrPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.speech_recognition_translation.asr(
+ documents=["documents"],
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v2/asr/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "documents": documents,
+ "selected_model": selected_model,
+ "language": language,
+ "translation_model": translation_model,
+ "output_format": output_format,
+ "google_translate_target": google_translate_target,
+ "translation_source": translation_source,
+ "translation_target": translation_target,
+ "glossary_document": glossary_document,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsrPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def async_asr(
+ self,
+ *,
+ documents: typing.Sequence[str],
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ selected_model: typing.Optional[AsrPageRequestSelectedModel] = OMIT,
+ language: typing.Optional[str] = OMIT,
+ translation_model: typing.Optional[AsrPageRequestTranslationModel] = OMIT,
+ output_format: typing.Optional[AsrPageRequestOutputFormat] = OMIT,
+ google_translate_target: typing.Optional[str] = OMIT,
+ translation_source: typing.Optional[str] = OMIT,
+ translation_target: typing.Optional[str] = OMIT,
+ glossary_document: typing.Optional[str] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ documents : typing.Sequence[str]
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ selected_model : typing.Optional[AsrPageRequestSelectedModel]
+
+ language : typing.Optional[str]
+
+ translation_model : typing.Optional[AsrPageRequestTranslationModel]
+
+ output_format : typing.Optional[AsrPageRequestOutputFormat]
+
+ google_translate_target : typing.Optional[str]
+ use `translation_model` & `translation_target` instead.
+
+ translation_source : typing.Optional[str]
+
+ translation_target : typing.Optional[str]
+
+ glossary_document : typing.Optional[str]
+ Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+ If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.speech_recognition_translation.async_asr(
+ documents=["documents"],
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/asr/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "documents": documents,
+ "selected_model": selected_model,
+ "language": language,
+ "translation_model": translation_model,
+ "output_format": output_format,
+ "google_translate_target": google_translate_target,
+ "translation_source": translation_source,
+ "translation_target": translation_target,
+ "glossary_document": glossary_document,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def status_asr(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsrPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsrPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.speech_recognition_translation.status_asr(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/asr/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsrPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncSpeechRecognitionTranslationClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def asr(
+ self,
+ *,
+ documents: typing.Sequence[str],
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ selected_model: typing.Optional[AsrPageRequestSelectedModel] = OMIT,
+ language: typing.Optional[str] = OMIT,
+ translation_model: typing.Optional[AsrPageRequestTranslationModel] = OMIT,
+ output_format: typing.Optional[AsrPageRequestOutputFormat] = OMIT,
+ google_translate_target: typing.Optional[str] = OMIT,
+ translation_source: typing.Optional[str] = OMIT,
+ translation_target: typing.Optional[str] = OMIT,
+ glossary_document: typing.Optional[str] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsrPageResponse:
+ """
+ Parameters
+ ----------
+ documents : typing.Sequence[str]
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ selected_model : typing.Optional[AsrPageRequestSelectedModel]
+
+ language : typing.Optional[str]
+
+ translation_model : typing.Optional[AsrPageRequestTranslationModel]
+
+ output_format : typing.Optional[AsrPageRequestOutputFormat]
+
+ google_translate_target : typing.Optional[str]
+ use `translation_model` & `translation_target` instead.
+
+ translation_source : typing.Optional[str]
+
+ translation_target : typing.Optional[str]
+
+ glossary_document : typing.Optional[str]
+ Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+ If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsrPageResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.speech_recognition_translation.asr(
+ documents=["documents"],
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v2/asr/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "documents": documents,
+ "selected_model": selected_model,
+ "language": language,
+ "translation_model": translation_model,
+ "output_format": output_format,
+ "google_translate_target": google_translate_target,
+ "translation_source": translation_source,
+ "translation_target": translation_target,
+ "glossary_document": glossary_document,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsrPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def async_asr(
+ self,
+ *,
+ documents: typing.Sequence[str],
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ selected_model: typing.Optional[AsrPageRequestSelectedModel] = OMIT,
+ language: typing.Optional[str] = OMIT,
+ translation_model: typing.Optional[AsrPageRequestTranslationModel] = OMIT,
+ output_format: typing.Optional[AsrPageRequestOutputFormat] = OMIT,
+ google_translate_target: typing.Optional[str] = OMIT,
+ translation_source: typing.Optional[str] = OMIT,
+ translation_target: typing.Optional[str] = OMIT,
+ glossary_document: typing.Optional[str] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ documents : typing.Sequence[str]
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ selected_model : typing.Optional[AsrPageRequestSelectedModel]
+
+ language : typing.Optional[str]
+
+ translation_model : typing.Optional[AsrPageRequestTranslationModel]
+
+ output_format : typing.Optional[AsrPageRequestOutputFormat]
+
+ google_translate_target : typing.Optional[str]
+ use `translation_model` & `translation_target` instead.
+
+ translation_source : typing.Optional[str]
+
+ translation_target : typing.Optional[str]
+
+ glossary_document : typing.Optional[str]
+ Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+ If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.speech_recognition_translation.async_asr(
+ documents=["documents"],
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/asr/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "documents": documents,
+ "selected_model": selected_model,
+ "language": language,
+ "translation_model": translation_model,
+ "output_format": output_format,
+ "google_translate_target": google_translate_target,
+ "translation_source": translation_source,
+ "translation_target": translation_target,
+ "glossary_document": glossary_document,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def status_asr(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsrPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsrPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.speech_recognition_translation.status_asr(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/asr/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsrPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/summarize_your_docs_with_gpt/__init__.py b/src/gooey/summarize_your_docs_with_gpt/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/summarize_your_docs_with_gpt/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/summarize_your_docs_with_gpt/client.py b/src/gooey/summarize_your_docs_with_gpt/client.py
new file mode 100644
index 0000000..b5e88bb
--- /dev/null
+++ b/src/gooey/summarize_your_docs_with_gpt/client.py
@@ -0,0 +1,620 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import pydantic_v1
+from ..core.request_options import RequestOptions
+from ..errors.internal_server_error import InternalServerError
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3
+from ..types.doc_summary_page_request_selected_asr_model import DocSummaryPageRequestSelectedAsrModel
+from ..types.doc_summary_page_request_selected_model import DocSummaryPageRequestSelectedModel
+from ..types.doc_summary_page_response import DocSummaryPageResponse
+from ..types.doc_summary_page_status_response import DocSummaryPageStatusResponse
+from ..types.failed_reponse_model_v2 import FailedReponseModelV2
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.recipe_function import RecipeFunction
+from ..types.run_settings import RunSettings
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class SummarizeYourDocsWithGptClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def doc_summary(
+ self,
+ *,
+ documents: typing.Sequence[str],
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ task_instructions: typing.Optional[str] = OMIT,
+ merge_instructions: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[DocSummaryPageRequestSelectedModel] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ chain_type: typing.Optional[typing.Literal["map_reduce"]] = OMIT,
+ selected_asr_model: typing.Optional[DocSummaryPageRequestSelectedAsrModel] = OMIT,
+ google_translate_target: typing.Optional[str] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> DocSummaryPageResponse:
+ """
+ Parameters
+ ----------
+ documents : typing.Sequence[str]
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ task_instructions : typing.Optional[str]
+
+ merge_instructions : typing.Optional[str]
+
+ selected_model : typing.Optional[DocSummaryPageRequestSelectedModel]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ chain_type : typing.Optional[typing.Literal["map_reduce"]]
+
+ selected_asr_model : typing.Optional[DocSummaryPageRequestSelectedAsrModel]
+
+ google_translate_target : typing.Optional[str]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ DocSummaryPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.summarize_your_docs_with_gpt.doc_summary(
+ documents=["documents"],
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v2/doc-summary/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "documents": documents,
+ "task_instructions": task_instructions,
+ "merge_instructions": merge_instructions,
+ "selected_model": selected_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "chain_type": chain_type,
+ "selected_asr_model": selected_asr_model,
+ "google_translate_target": google_translate_target,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(DocSummaryPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def async_doc_summary(
+ self,
+ *,
+ documents: typing.Sequence[str],
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ task_instructions: typing.Optional[str] = OMIT,
+ merge_instructions: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[DocSummaryPageRequestSelectedModel] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ chain_type: typing.Optional[typing.Literal["map_reduce"]] = OMIT,
+ selected_asr_model: typing.Optional[DocSummaryPageRequestSelectedAsrModel] = OMIT,
+ google_translate_target: typing.Optional[str] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ documents : typing.Sequence[str]
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ task_instructions : typing.Optional[str]
+
+ merge_instructions : typing.Optional[str]
+
+ selected_model : typing.Optional[DocSummaryPageRequestSelectedModel]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ chain_type : typing.Optional[typing.Literal["map_reduce"]]
+
+ selected_asr_model : typing.Optional[DocSummaryPageRequestSelectedAsrModel]
+
+ google_translate_target : typing.Optional[str]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.summarize_your_docs_with_gpt.async_doc_summary(
+ documents=["documents"],
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/doc-summary/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "documents": documents,
+ "task_instructions": task_instructions,
+ "merge_instructions": merge_instructions,
+ "selected_model": selected_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "chain_type": chain_type,
+ "selected_asr_model": selected_asr_model,
+ "google_translate_target": google_translate_target,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def status_doc_summary(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> DocSummaryPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ DocSummaryPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.summarize_your_docs_with_gpt.status_doc_summary(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/doc-summary/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(DocSummaryPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncSummarizeYourDocsWithGptClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def doc_summary(
+ self,
+ *,
+ documents: typing.Sequence[str],
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ task_instructions: typing.Optional[str] = OMIT,
+ merge_instructions: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[DocSummaryPageRequestSelectedModel] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ chain_type: typing.Optional[typing.Literal["map_reduce"]] = OMIT,
+ selected_asr_model: typing.Optional[DocSummaryPageRequestSelectedAsrModel] = OMIT,
+ google_translate_target: typing.Optional[str] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> DocSummaryPageResponse:
+ """
+ Parameters
+ ----------
+ documents : typing.Sequence[str]
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ task_instructions : typing.Optional[str]
+
+ merge_instructions : typing.Optional[str]
+
+ selected_model : typing.Optional[DocSummaryPageRequestSelectedModel]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ chain_type : typing.Optional[typing.Literal["map_reduce"]]
+
+ selected_asr_model : typing.Optional[DocSummaryPageRequestSelectedAsrModel]
+
+ google_translate_target : typing.Optional[str]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ DocSummaryPageResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.summarize_your_docs_with_gpt.doc_summary(
+ documents=["documents"],
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v2/doc-summary/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "documents": documents,
+ "task_instructions": task_instructions,
+ "merge_instructions": merge_instructions,
+ "selected_model": selected_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "chain_type": chain_type,
+ "selected_asr_model": selected_asr_model,
+ "google_translate_target": google_translate_target,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(DocSummaryPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def async_doc_summary(
+ self,
+ *,
+ documents: typing.Sequence[str],
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ task_instructions: typing.Optional[str] = OMIT,
+ merge_instructions: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[DocSummaryPageRequestSelectedModel] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ chain_type: typing.Optional[typing.Literal["map_reduce"]] = OMIT,
+ selected_asr_model: typing.Optional[DocSummaryPageRequestSelectedAsrModel] = OMIT,
+ google_translate_target: typing.Optional[str] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ documents : typing.Sequence[str]
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ task_instructions : typing.Optional[str]
+
+ merge_instructions : typing.Optional[str]
+
+ selected_model : typing.Optional[DocSummaryPageRequestSelectedModel]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ chain_type : typing.Optional[typing.Literal["map_reduce"]]
+
+ selected_asr_model : typing.Optional[DocSummaryPageRequestSelectedAsrModel]
+
+ google_translate_target : typing.Optional[str]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.summarize_your_docs_with_gpt.async_doc_summary(
+ documents=["documents"],
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/doc-summary/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "documents": documents,
+ "task_instructions": task_instructions,
+ "merge_instructions": merge_instructions,
+ "selected_model": selected_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "chain_type": chain_type,
+ "selected_asr_model": selected_asr_model,
+ "google_translate_target": google_translate_target,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def status_doc_summary(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> DocSummaryPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ DocSummaryPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.summarize_your_docs_with_gpt.status_doc_summary(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/doc-summary/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(DocSummaryPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/synthetic_data_maker_for_videos_pd_fs/__init__.py b/src/gooey/synthetic_data_maker_for_videos_pd_fs/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/synthetic_data_maker_for_videos_pd_fs/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/synthetic_data_maker_for_videos_pd_fs/client.py b/src/gooey/synthetic_data_maker_for_videos_pd_fs/client.py
new file mode 100644
index 0000000..4472793
--- /dev/null
+++ b/src/gooey/synthetic_data_maker_for_videos_pd_fs/client.py
@@ -0,0 +1,628 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import pydantic_v1
+from ..core.request_options import RequestOptions
+from ..errors.internal_server_error import InternalServerError
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3
+from ..types.doc_extract_page_request_selected_asr_model import DocExtractPageRequestSelectedAsrModel
+from ..types.doc_extract_page_request_selected_model import DocExtractPageRequestSelectedModel
+from ..types.doc_extract_page_response import DocExtractPageResponse
+from ..types.doc_extract_page_status_response import DocExtractPageStatusResponse
+from ..types.failed_reponse_model_v2 import FailedReponseModelV2
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.recipe_function import RecipeFunction
+from ..types.run_settings import RunSettings
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class SyntheticDataMakerForVideosPdFsClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def doc_extract(
+ self,
+ *,
+ documents: typing.Sequence[str],
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ sheet_url: typing.Optional[str] = OMIT,
+ selected_asr_model: typing.Optional[DocExtractPageRequestSelectedAsrModel] = OMIT,
+ google_translate_target: typing.Optional[str] = OMIT,
+ glossary_document: typing.Optional[str] = OMIT,
+ task_instructions: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[DocExtractPageRequestSelectedModel] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> DocExtractPageResponse:
+ """
+ Parameters
+ ----------
+ documents : typing.Sequence[str]
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ sheet_url : typing.Optional[str]
+
+ selected_asr_model : typing.Optional[DocExtractPageRequestSelectedAsrModel]
+
+ google_translate_target : typing.Optional[str]
+
+ glossary_document : typing.Optional[str]
+ Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+ If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+
+ task_instructions : typing.Optional[str]
+
+ selected_model : typing.Optional[DocExtractPageRequestSelectedModel]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ DocExtractPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.synthetic_data_maker_for_videos_pd_fs.doc_extract(
+ documents=["documents"],
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v2/doc-extract/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "documents": documents,
+ "sheet_url": sheet_url,
+ "selected_asr_model": selected_asr_model,
+ "google_translate_target": google_translate_target,
+ "glossary_document": glossary_document,
+ "task_instructions": task_instructions,
+ "selected_model": selected_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(DocExtractPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def async_doc_extract(
+ self,
+ *,
+ documents: typing.Sequence[str],
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ sheet_url: typing.Optional[str] = OMIT,
+ selected_asr_model: typing.Optional[DocExtractPageRequestSelectedAsrModel] = OMIT,
+ google_translate_target: typing.Optional[str] = OMIT,
+ glossary_document: typing.Optional[str] = OMIT,
+ task_instructions: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[DocExtractPageRequestSelectedModel] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ documents : typing.Sequence[str]
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ sheet_url : typing.Optional[str]
+
+ selected_asr_model : typing.Optional[DocExtractPageRequestSelectedAsrModel]
+
+ google_translate_target : typing.Optional[str]
+
+ glossary_document : typing.Optional[str]
+ Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+ If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+
+ task_instructions : typing.Optional[str]
+
+ selected_model : typing.Optional[DocExtractPageRequestSelectedModel]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.synthetic_data_maker_for_videos_pd_fs.async_doc_extract(
+ documents=["documents"],
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/doc-extract/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "documents": documents,
+ "sheet_url": sheet_url,
+ "selected_asr_model": selected_asr_model,
+ "google_translate_target": google_translate_target,
+ "glossary_document": glossary_document,
+ "task_instructions": task_instructions,
+ "selected_model": selected_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def status_doc_extract(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> DocExtractPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ DocExtractPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.synthetic_data_maker_for_videos_pd_fs.status_doc_extract(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/doc-extract/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(DocExtractPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncSyntheticDataMakerForVideosPdFsClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def doc_extract(
+ self,
+ *,
+ documents: typing.Sequence[str],
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ sheet_url: typing.Optional[str] = OMIT,
+ selected_asr_model: typing.Optional[DocExtractPageRequestSelectedAsrModel] = OMIT,
+ google_translate_target: typing.Optional[str] = OMIT,
+ glossary_document: typing.Optional[str] = OMIT,
+ task_instructions: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[DocExtractPageRequestSelectedModel] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> DocExtractPageResponse:
+ """
+ Parameters
+ ----------
+ documents : typing.Sequence[str]
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ sheet_url : typing.Optional[str]
+
+ selected_asr_model : typing.Optional[DocExtractPageRequestSelectedAsrModel]
+
+ google_translate_target : typing.Optional[str]
+
+ glossary_document : typing.Optional[str]
+ Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+ If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+
+ task_instructions : typing.Optional[str]
+
+ selected_model : typing.Optional[DocExtractPageRequestSelectedModel]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ DocExtractPageResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.synthetic_data_maker_for_videos_pd_fs.doc_extract(
+ documents=["documents"],
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v2/doc-extract/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "documents": documents,
+ "sheet_url": sheet_url,
+ "selected_asr_model": selected_asr_model,
+ "google_translate_target": google_translate_target,
+ "glossary_document": glossary_document,
+ "task_instructions": task_instructions,
+ "selected_model": selected_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(DocExtractPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def async_doc_extract(
+ self,
+ *,
+ documents: typing.Sequence[str],
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ sheet_url: typing.Optional[str] = OMIT,
+ selected_asr_model: typing.Optional[DocExtractPageRequestSelectedAsrModel] = OMIT,
+ google_translate_target: typing.Optional[str] = OMIT,
+ glossary_document: typing.Optional[str] = OMIT,
+ task_instructions: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[DocExtractPageRequestSelectedModel] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ documents : typing.Sequence[str]
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ sheet_url : typing.Optional[str]
+
+ selected_asr_model : typing.Optional[DocExtractPageRequestSelectedAsrModel]
+
+ google_translate_target : typing.Optional[str]
+
+ glossary_document : typing.Optional[str]
+ Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+ If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+
+ task_instructions : typing.Optional[str]
+
+ selected_model : typing.Optional[DocExtractPageRequestSelectedModel]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.synthetic_data_maker_for_videos_pd_fs.async_doc_extract(
+ documents=["documents"],
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/doc-extract/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "documents": documents,
+ "sheet_url": sheet_url,
+ "selected_asr_model": selected_asr_model,
+ "google_translate_target": google_translate_target,
+ "glossary_document": glossary_document,
+ "task_instructions": task_instructions,
+ "selected_model": selected_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def status_doc_extract(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> DocExtractPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ DocExtractPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.synthetic_data_maker_for_videos_pd_fs.status_doc_extract(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/doc-extract/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(DocExtractPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/text_guided_audio_generator/__init__.py b/src/gooey/text_guided_audio_generator/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/text_guided_audio_generator/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/text_guided_audio_generator/client.py b/src/gooey/text_guided_audio_generator/client.py
new file mode 100644
index 0000000..8f0ad5d
--- /dev/null
+++ b/src/gooey/text_guided_audio_generator/client.py
@@ -0,0 +1,570 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import pydantic_v1
+from ..core.request_options import RequestOptions
+from ..errors.internal_server_error import InternalServerError
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3
+from ..types.failed_reponse_model_v2 import FailedReponseModelV2
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.recipe_function import RecipeFunction
+from ..types.run_settings import RunSettings
+from ..types.text2audio_page_response import Text2AudioPageResponse
+from ..types.text2audio_page_status_response import Text2AudioPageStatusResponse
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class TextGuidedAudioGeneratorClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def text2audio(
+ self,
+ *,
+ text_prompt: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ negative_prompt: typing.Optional[str] = OMIT,
+ duration_sec: typing.Optional[float] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[int] = OMIT,
+ guidance_scale: typing.Optional[float] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ sd2upscaling: typing.Optional[bool] = OMIT,
+ selected_models: typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> Text2AudioPageResponse:
+ """
+ Parameters
+ ----------
+ text_prompt : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ negative_prompt : typing.Optional[str]
+
+ duration_sec : typing.Optional[float]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ seed : typing.Optional[int]
+
+ sd2upscaling : typing.Optional[bool]
+
+ selected_models : typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ Text2AudioPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.text_guided_audio_generator.text2audio(
+ text_prompt="text_prompt",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v2/text2audio/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "text_prompt": text_prompt,
+ "negative_prompt": negative_prompt,
+ "duration_sec": duration_sec,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "guidance_scale": guidance_scale,
+ "seed": seed,
+ "sd_2_upscaling": sd2upscaling,
+ "selected_models": selected_models,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(Text2AudioPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def async_text2audio(
+ self,
+ *,
+ text_prompt: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ negative_prompt: typing.Optional[str] = OMIT,
+ duration_sec: typing.Optional[float] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[int] = OMIT,
+ guidance_scale: typing.Optional[float] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ sd2upscaling: typing.Optional[bool] = OMIT,
+ selected_models: typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ text_prompt : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ negative_prompt : typing.Optional[str]
+
+ duration_sec : typing.Optional[float]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ seed : typing.Optional[int]
+
+ sd2upscaling : typing.Optional[bool]
+
+ selected_models : typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.text_guided_audio_generator.async_text2audio(
+ text_prompt="text_prompt",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/text2audio/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "text_prompt": text_prompt,
+ "negative_prompt": negative_prompt,
+ "duration_sec": duration_sec,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "guidance_scale": guidance_scale,
+ "seed": seed,
+ "sd_2_upscaling": sd2upscaling,
+ "selected_models": selected_models,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def status_text2audio(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> Text2AudioPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ Text2AudioPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.text_guided_audio_generator.status_text2audio(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/text2audio/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(Text2AudioPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncTextGuidedAudioGeneratorClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def text2audio(
+ self,
+ *,
+ text_prompt: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ negative_prompt: typing.Optional[str] = OMIT,
+ duration_sec: typing.Optional[float] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[int] = OMIT,
+ guidance_scale: typing.Optional[float] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ sd2upscaling: typing.Optional[bool] = OMIT,
+ selected_models: typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> Text2AudioPageResponse:
+ """
+ Parameters
+ ----------
+ text_prompt : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ negative_prompt : typing.Optional[str]
+
+ duration_sec : typing.Optional[float]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ seed : typing.Optional[int]
+
+ sd2upscaling : typing.Optional[bool]
+
+ selected_models : typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ Text2AudioPageResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.text_guided_audio_generator.text2audio(
+ text_prompt="text_prompt",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v2/text2audio/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "text_prompt": text_prompt,
+ "negative_prompt": negative_prompt,
+ "duration_sec": duration_sec,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "guidance_scale": guidance_scale,
+ "seed": seed,
+ "sd_2_upscaling": sd2upscaling,
+ "selected_models": selected_models,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(Text2AudioPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def async_text2audio(
+ self,
+ *,
+ text_prompt: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ negative_prompt: typing.Optional[str] = OMIT,
+ duration_sec: typing.Optional[float] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[int] = OMIT,
+ guidance_scale: typing.Optional[float] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ sd2upscaling: typing.Optional[bool] = OMIT,
+ selected_models: typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ text_prompt : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ negative_prompt : typing.Optional[str]
+
+ duration_sec : typing.Optional[float]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[int]
+
+ guidance_scale : typing.Optional[float]
+
+ seed : typing.Optional[int]
+
+ sd2upscaling : typing.Optional[bool]
+
+ selected_models : typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]]
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.text_guided_audio_generator.async_text2audio(
+ text_prompt="text_prompt",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/text2audio/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "text_prompt": text_prompt,
+ "negative_prompt": negative_prompt,
+ "duration_sec": duration_sec,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "guidance_scale": guidance_scale,
+ "seed": seed,
+ "sd_2_upscaling": sd2upscaling,
+ "selected_models": selected_models,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def status_text2audio(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> Text2AudioPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ Text2AudioPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.text_guided_audio_generator.status_text2audio(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/text2audio/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(Text2AudioPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/gooey/types/__init__.py b/src/gooey/types/__init__.py
new file mode 100644
index 0000000..2693f1e
--- /dev/null
+++ b/src/gooey/types/__init__.py
@@ -0,0 +1,515 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .agg_function import AggFunction
+from .agg_function_result import AggFunctionResult
+from .animation_prompt import AnimationPrompt
+from .asr_chunk import AsrChunk
+from .asr_output_json import AsrOutputJson
+from .asr_page_output import AsrPageOutput
+from .asr_page_output_output_text_item import AsrPageOutputOutputTextItem
+from .asr_page_request import AsrPageRequest
+from .asr_page_request_output_format import AsrPageRequestOutputFormat
+from .asr_page_request_selected_model import AsrPageRequestSelectedModel
+from .asr_page_request_translation_model import AsrPageRequestTranslationModel
+from .asr_page_response import AsrPageResponse
+from .asr_page_status_response import AsrPageStatusResponse
+from .async_api_response_model_v3 import AsyncApiResponseModelV3
+from .balance_response import BalanceResponse
+from .bot_broadcast_filters import BotBroadcastFilters
+from .bulk_eval_page_output import BulkEvalPageOutput
+from .bulk_eval_page_request import BulkEvalPageRequest
+from .bulk_eval_page_request_selected_model import BulkEvalPageRequestSelectedModel
+from .bulk_eval_page_response import BulkEvalPageResponse
+from .bulk_eval_page_status_response import BulkEvalPageStatusResponse
+from .bulk_runner_page_output import BulkRunnerPageOutput
+from .bulk_runner_page_request import BulkRunnerPageRequest
+from .bulk_runner_page_response import BulkRunnerPageResponse
+from .bulk_runner_page_status_response import BulkRunnerPageStatusResponse
+from .button_pressed import ButtonPressed
+from .called_function_response import CalledFunctionResponse
+from .chat_completion_content_part_image_param import ChatCompletionContentPartImageParam
+from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam
+from .chyron_plant_page_output import ChyronPlantPageOutput
+from .chyron_plant_page_request import ChyronPlantPageRequest
+from .chyron_plant_page_response import ChyronPlantPageResponse
+from .chyron_plant_page_status_response import ChyronPlantPageStatusResponse
+from .compare_llm_page_output import CompareLlmPageOutput
+from .compare_llm_page_request import CompareLlmPageRequest
+from .compare_llm_page_request_response_format_type import CompareLlmPageRequestResponseFormatType
+from .compare_llm_page_request_selected_models_item import CompareLlmPageRequestSelectedModelsItem
+from .compare_llm_page_response import CompareLlmPageResponse
+from .compare_llm_page_status_response import CompareLlmPageStatusResponse
+from .compare_text2img_page_output import CompareText2ImgPageOutput
+from .compare_text2img_page_request import CompareText2ImgPageRequest
+from .compare_text2img_page_request_selected_models_item import CompareText2ImgPageRequestSelectedModelsItem
+from .compare_text2img_page_response import CompareText2ImgPageResponse
+from .compare_text2img_page_status_response import CompareText2ImgPageStatusResponse
+from .compare_upscaler_page_output import CompareUpscalerPageOutput
+from .compare_upscaler_page_request import CompareUpscalerPageRequest
+from .compare_upscaler_page_request_selected_models_item import CompareUpscalerPageRequestSelectedModelsItem
+from .compare_upscaler_page_response import CompareUpscalerPageResponse
+from .compare_upscaler_page_status_response import CompareUpscalerPageStatusResponse
+from .console_logs import ConsoleLogs
+from .content import Content
+from .conversation_entry import ConversationEntry
+from .conversation_entry_content_item import (
+ ConversationEntryContentItem,
+ ConversationEntryContentItem_ImageUrl,
+ ConversationEntryContentItem_Text,
+)
+from .conversation_start import ConversationStart
+from .create_stream_response import CreateStreamResponse
+from .deforum_sd_page_output import DeforumSdPageOutput
+from .deforum_sd_page_request import DeforumSdPageRequest
+from .deforum_sd_page_request_selected_model import DeforumSdPageRequestSelectedModel
+from .deforum_sd_page_response import DeforumSdPageResponse
+from .deforum_sd_page_status_response import DeforumSdPageStatusResponse
+from .detail import Detail
+from .doc_extract_page_output import DocExtractPageOutput
+from .doc_extract_page_request import DocExtractPageRequest
+from .doc_extract_page_request_selected_asr_model import DocExtractPageRequestSelectedAsrModel
+from .doc_extract_page_request_selected_model import DocExtractPageRequestSelectedModel
+from .doc_extract_page_response import DocExtractPageResponse
+from .doc_extract_page_status_response import DocExtractPageStatusResponse
+from .doc_search_page_output import DocSearchPageOutput
+from .doc_search_page_request import DocSearchPageRequest
+from .doc_search_page_request_citation_style import DocSearchPageRequestCitationStyle
+from .doc_search_page_request_embedding_model import DocSearchPageRequestEmbeddingModel
+from .doc_search_page_request_keyword_query import DocSearchPageRequestKeywordQuery
+from .doc_search_page_request_selected_model import DocSearchPageRequestSelectedModel
+from .doc_search_page_response import DocSearchPageResponse
+from .doc_search_page_status_response import DocSearchPageStatusResponse
+from .doc_summary_page_output import DocSummaryPageOutput
+from .doc_summary_page_request import DocSummaryPageRequest
+from .doc_summary_page_request_selected_asr_model import DocSummaryPageRequestSelectedAsrModel
+from .doc_summary_page_request_selected_model import DocSummaryPageRequestSelectedModel
+from .doc_summary_page_response import DocSummaryPageResponse
+from .doc_summary_page_status_response import DocSummaryPageStatusResponse
+from .email_face_inpainting_page_output import EmailFaceInpaintingPageOutput
+from .email_face_inpainting_page_request import EmailFaceInpaintingPageRequest
+from .email_face_inpainting_page_request_selected_model import EmailFaceInpaintingPageRequestSelectedModel
+from .email_face_inpainting_page_response import EmailFaceInpaintingPageResponse
+from .email_face_inpainting_page_status_response import EmailFaceInpaintingPageStatusResponse
+from .embeddings_page_output import EmbeddingsPageOutput
+from .embeddings_page_request import EmbeddingsPageRequest
+from .embeddings_page_request_selected_model import EmbeddingsPageRequestSelectedModel
+from .embeddings_page_response import EmbeddingsPageResponse
+from .embeddings_page_status_response import EmbeddingsPageStatusResponse
+from .eval_prompt import EvalPrompt
+from .face_inpainting_page_output import FaceInpaintingPageOutput
+from .face_inpainting_page_request import FaceInpaintingPageRequest
+from .face_inpainting_page_request_selected_model import FaceInpaintingPageRequestSelectedModel
+from .face_inpainting_page_response import FaceInpaintingPageResponse
+from .face_inpainting_page_status_response import FaceInpaintingPageStatusResponse
+from .failed_reponse_model_v2 import FailedReponseModelV2
+from .failed_response_detail import FailedResponseDetail
+from .final_response import FinalResponse
+from .function import Function
+from .functions_page_output import FunctionsPageOutput
+from .functions_page_request import FunctionsPageRequest
+from .functions_page_response import FunctionsPageResponse
+from .functions_page_status_response import FunctionsPageStatusResponse
+from .generic_error_response import GenericErrorResponse
+from .generic_error_response_detail import GenericErrorResponseDetail
+from .google_gpt_page_output import GoogleGptPageOutput
+from .google_gpt_page_request import GoogleGptPageRequest
+from .google_gpt_page_request_embedding_model import GoogleGptPageRequestEmbeddingModel
+from .google_gpt_page_request_selected_model import GoogleGptPageRequestSelectedModel
+from .google_gpt_page_response import GoogleGptPageResponse
+from .google_gpt_page_status_response import GoogleGptPageStatusResponse
+from .google_image_gen_page_output import GoogleImageGenPageOutput
+from .google_image_gen_page_request import GoogleImageGenPageRequest
+from .google_image_gen_page_request_selected_model import GoogleImageGenPageRequestSelectedModel
+from .google_image_gen_page_response import GoogleImageGenPageResponse
+from .google_image_gen_page_status_response import GoogleImageGenPageStatusResponse
+from .http_validation_error import HttpValidationError
+from .image_segmentation_page_output import ImageSegmentationPageOutput
+from .image_segmentation_page_request import ImageSegmentationPageRequest
+from .image_segmentation_page_request_selected_model import ImageSegmentationPageRequestSelectedModel
+from .image_segmentation_page_response import ImageSegmentationPageResponse
+from .image_segmentation_page_status_response import ImageSegmentationPageStatusResponse
+from .image_url import ImageUrl
+from .img2img_page_output import Img2ImgPageOutput
+from .img2img_page_request import Img2ImgPageRequest
+from .img2img_page_request_selected_controlnet_model import Img2ImgPageRequestSelectedControlnetModel
+from .img2img_page_request_selected_controlnet_model_item import Img2ImgPageRequestSelectedControlnetModelItem
+from .img2img_page_request_selected_model import Img2ImgPageRequestSelectedModel
+from .img2img_page_response import Img2ImgPageResponse
+from .img2img_page_status_response import Img2ImgPageStatusResponse
+from .letter_writer_page_output import LetterWriterPageOutput
+from .letter_writer_page_request import LetterWriterPageRequest
+from .letter_writer_page_response import LetterWriterPageResponse
+from .letter_writer_page_status_response import LetterWriterPageStatusResponse
+from .level import Level
+from .lipsync_page_output import LipsyncPageOutput
+from .lipsync_page_request import LipsyncPageRequest
+from .lipsync_page_request_selected_model import LipsyncPageRequestSelectedModel
+from .lipsync_page_response import LipsyncPageResponse
+from .lipsync_page_status_response import LipsyncPageStatusResponse
+from .lipsync_tts_page_output import LipsyncTtsPageOutput
+from .lipsync_tts_page_request import LipsyncTtsPageRequest
+from .lipsync_tts_page_request_openai_tts_model import LipsyncTtsPageRequestOpenaiTtsModel
+from .lipsync_tts_page_request_openai_voice_name import LipsyncTtsPageRequestOpenaiVoiceName
+from .lipsync_tts_page_request_selected_model import LipsyncTtsPageRequestSelectedModel
+from .lipsync_tts_page_request_tts_provider import LipsyncTtsPageRequestTtsProvider
+from .lipsync_tts_page_response import LipsyncTtsPageResponse
+from .lipsync_tts_page_status_response import LipsyncTtsPageStatusResponse
+from .llm_tools import LlmTools
+from .message_part import MessagePart
+from .object_inpainting_page_output import ObjectInpaintingPageOutput
+from .object_inpainting_page_request import ObjectInpaintingPageRequest
+from .object_inpainting_page_request_selected_model import ObjectInpaintingPageRequestSelectedModel
+from .object_inpainting_page_response import ObjectInpaintingPageResponse
+from .object_inpainting_page_status_response import ObjectInpaintingPageStatusResponse
+from .preprocess import Preprocess
+from .prompt import Prompt
+from .prompt_tree_node import PromptTreeNode
+from .qr_code_generator_page_output import QrCodeGeneratorPageOutput
+from .qr_code_generator_page_request import QrCodeGeneratorPageRequest
+from .qr_code_generator_page_request_image_prompt_controlnet_models_item import (
+ QrCodeGeneratorPageRequestImagePromptControlnetModelsItem,
+)
+from .qr_code_generator_page_request_selected_controlnet_model_item import (
+ QrCodeGeneratorPageRequestSelectedControlnetModelItem,
+)
+from .qr_code_generator_page_request_selected_model import QrCodeGeneratorPageRequestSelectedModel
+from .qr_code_generator_page_response import QrCodeGeneratorPageResponse
+from .qr_code_generator_page_status_response import QrCodeGeneratorPageStatusResponse
+from .recipe_function import RecipeFunction
+from .recipe_run_state import RecipeRunState
+from .related_doc_search_response import RelatedDocSearchResponse
+from .related_google_gpt_response import RelatedGoogleGptResponse
+from .related_qn_a_doc_page_output import RelatedQnADocPageOutput
+from .related_qn_a_doc_page_request import RelatedQnADocPageRequest
+from .related_qn_a_doc_page_request_citation_style import RelatedQnADocPageRequestCitationStyle
+from .related_qn_a_doc_page_request_embedding_model import RelatedQnADocPageRequestEmbeddingModel
+from .related_qn_a_doc_page_request_keyword_query import RelatedQnADocPageRequestKeywordQuery
+from .related_qn_a_doc_page_request_selected_model import RelatedQnADocPageRequestSelectedModel
+from .related_qn_a_doc_page_response import RelatedQnADocPageResponse
+from .related_qn_a_doc_page_status_response import RelatedQnADocPageStatusResponse
+from .related_qn_a_page_output import RelatedQnAPageOutput
+from .related_qn_a_page_request import RelatedQnAPageRequest
+from .related_qn_a_page_request_embedding_model import RelatedQnAPageRequestEmbeddingModel
+from .related_qn_a_page_request_selected_model import RelatedQnAPageRequestSelectedModel
+from .related_qn_a_page_response import RelatedQnAPageResponse
+from .related_qn_a_page_status_response import RelatedQnAPageStatusResponse
+from .reply_button import ReplyButton
+from .response_model import ResponseModel
+from .response_model_final_keyword_query import ResponseModelFinalKeywordQuery
+from .response_model_final_prompt import ResponseModelFinalPrompt
+from .role import Role
+from .run_settings import RunSettings
+from .run_settings_retention_policy import RunSettingsRetentionPolicy
+from .run_start import RunStart
+from .sad_talker_settings import SadTalkerSettings
+from .scheduler import Scheduler
+from .search_reference import SearchReference
+from .seo_summary_page_output import SeoSummaryPageOutput
+from .seo_summary_page_request import SeoSummaryPageRequest
+from .seo_summary_page_request_selected_model import SeoSummaryPageRequestSelectedModel
+from .seo_summary_page_response import SeoSummaryPageResponse
+from .seo_summary_page_status_response import SeoSummaryPageStatusResponse
+from .serp_search_location import SerpSearchLocation
+from .serp_search_type import SerpSearchType
+from .smart_gpt_page_output import SmartGptPageOutput
+from .smart_gpt_page_request import SmartGptPageRequest
+from .smart_gpt_page_request_selected_model import SmartGptPageRequestSelectedModel
+from .smart_gpt_page_response import SmartGptPageResponse
+from .smart_gpt_page_status_response import SmartGptPageStatusResponse
+from .social_lookup_email_page_output import SocialLookupEmailPageOutput
+from .social_lookup_email_page_request import SocialLookupEmailPageRequest
+from .social_lookup_email_page_request_selected_model import SocialLookupEmailPageRequestSelectedModel
+from .social_lookup_email_page_response import SocialLookupEmailPageResponse
+from .social_lookup_email_page_status_response import SocialLookupEmailPageStatusResponse
+from .stream_error import StreamError
+from .text2audio_page_output import Text2AudioPageOutput
+from .text2audio_page_request import Text2AudioPageRequest
+from .text2audio_page_response import Text2AudioPageResponse
+from .text2audio_page_status_response import Text2AudioPageStatusResponse
+from .text_to_speech_page_output import TextToSpeechPageOutput
+from .text_to_speech_page_request import TextToSpeechPageRequest
+from .text_to_speech_page_request_openai_tts_model import TextToSpeechPageRequestOpenaiTtsModel
+from .text_to_speech_page_request_openai_voice_name import TextToSpeechPageRequestOpenaiVoiceName
+from .text_to_speech_page_request_tts_provider import TextToSpeechPageRequestTtsProvider
+from .text_to_speech_page_response import TextToSpeechPageResponse
+from .text_to_speech_page_status_response import TextToSpeechPageStatusResponse
+from .training_data_model import TrainingDataModel
+from .translation_page_output import TranslationPageOutput
+from .translation_page_request import TranslationPageRequest
+from .translation_page_request_selected_model import TranslationPageRequestSelectedModel
+from .translation_page_response import TranslationPageResponse
+from .translation_page_status_response import TranslationPageStatusResponse
+from .trigger import Trigger
+from .validation_error import ValidationError
+from .validation_error_loc_item import ValidationErrorLocItem
+from .vcard import Vcard
+from .video_bots_page_output import VideoBotsPageOutput
+from .video_bots_page_output_final_keyword_query import VideoBotsPageOutputFinalKeywordQuery
+from .video_bots_page_output_final_prompt import VideoBotsPageOutputFinalPrompt
+from .video_bots_page_request import VideoBotsPageRequest
+from .video_bots_page_request_asr_model import VideoBotsPageRequestAsrModel
+from .video_bots_page_request_citation_style import VideoBotsPageRequestCitationStyle
+from .video_bots_page_request_embedding_model import VideoBotsPageRequestEmbeddingModel
+from .video_bots_page_request_lipsync_model import VideoBotsPageRequestLipsyncModel
+from .video_bots_page_request_openai_tts_model import VideoBotsPageRequestOpenaiTtsModel
+from .video_bots_page_request_openai_voice_name import VideoBotsPageRequestOpenaiVoiceName
+from .video_bots_page_request_selected_model import VideoBotsPageRequestSelectedModel
+from .video_bots_page_request_translation_model import VideoBotsPageRequestTranslationModel
+from .video_bots_page_request_tts_provider import VideoBotsPageRequestTtsProvider
+from .video_bots_page_response import VideoBotsPageResponse
+from .video_bots_page_status_response import VideoBotsPageStatusResponse
+
+__all__ = [
+ "AggFunction",
+ "AggFunctionResult",
+ "AnimationPrompt",
+ "AsrChunk",
+ "AsrOutputJson",
+ "AsrPageOutput",
+ "AsrPageOutputOutputTextItem",
+ "AsrPageRequest",
+ "AsrPageRequestOutputFormat",
+ "AsrPageRequestSelectedModel",
+ "AsrPageRequestTranslationModel",
+ "AsrPageResponse",
+ "AsrPageStatusResponse",
+ "AsyncApiResponseModelV3",
+ "BalanceResponse",
+ "BotBroadcastFilters",
+ "BulkEvalPageOutput",
+ "BulkEvalPageRequest",
+ "BulkEvalPageRequestSelectedModel",
+ "BulkEvalPageResponse",
+ "BulkEvalPageStatusResponse",
+ "BulkRunnerPageOutput",
+ "BulkRunnerPageRequest",
+ "BulkRunnerPageResponse",
+ "BulkRunnerPageStatusResponse",
+ "ButtonPressed",
+ "CalledFunctionResponse",
+ "ChatCompletionContentPartImageParam",
+ "ChatCompletionContentPartTextParam",
+ "ChyronPlantPageOutput",
+ "ChyronPlantPageRequest",
+ "ChyronPlantPageResponse",
+ "ChyronPlantPageStatusResponse",
+ "CompareLlmPageOutput",
+ "CompareLlmPageRequest",
+ "CompareLlmPageRequestResponseFormatType",
+ "CompareLlmPageRequestSelectedModelsItem",
+ "CompareLlmPageResponse",
+ "CompareLlmPageStatusResponse",
+ "CompareText2ImgPageOutput",
+ "CompareText2ImgPageRequest",
+ "CompareText2ImgPageRequestSelectedModelsItem",
+ "CompareText2ImgPageResponse",
+ "CompareText2ImgPageStatusResponse",
+ "CompareUpscalerPageOutput",
+ "CompareUpscalerPageRequest",
+ "CompareUpscalerPageRequestSelectedModelsItem",
+ "CompareUpscalerPageResponse",
+ "CompareUpscalerPageStatusResponse",
+ "ConsoleLogs",
+ "Content",
+ "ConversationEntry",
+ "ConversationEntryContentItem",
+ "ConversationEntryContentItem_ImageUrl",
+ "ConversationEntryContentItem_Text",
+ "ConversationStart",
+ "CreateStreamResponse",
+ "DeforumSdPageOutput",
+ "DeforumSdPageRequest",
+ "DeforumSdPageRequestSelectedModel",
+ "DeforumSdPageResponse",
+ "DeforumSdPageStatusResponse",
+ "Detail",
+ "DocExtractPageOutput",
+ "DocExtractPageRequest",
+ "DocExtractPageRequestSelectedAsrModel",
+ "DocExtractPageRequestSelectedModel",
+ "DocExtractPageResponse",
+ "DocExtractPageStatusResponse",
+ "DocSearchPageOutput",
+ "DocSearchPageRequest",
+ "DocSearchPageRequestCitationStyle",
+ "DocSearchPageRequestEmbeddingModel",
+ "DocSearchPageRequestKeywordQuery",
+ "DocSearchPageRequestSelectedModel",
+ "DocSearchPageResponse",
+ "DocSearchPageStatusResponse",
+ "DocSummaryPageOutput",
+ "DocSummaryPageRequest",
+ "DocSummaryPageRequestSelectedAsrModel",
+ "DocSummaryPageRequestSelectedModel",
+ "DocSummaryPageResponse",
+ "DocSummaryPageStatusResponse",
+ "EmailFaceInpaintingPageOutput",
+ "EmailFaceInpaintingPageRequest",
+ "EmailFaceInpaintingPageRequestSelectedModel",
+ "EmailFaceInpaintingPageResponse",
+ "EmailFaceInpaintingPageStatusResponse",
+ "EmbeddingsPageOutput",
+ "EmbeddingsPageRequest",
+ "EmbeddingsPageRequestSelectedModel",
+ "EmbeddingsPageResponse",
+ "EmbeddingsPageStatusResponse",
+ "EvalPrompt",
+ "FaceInpaintingPageOutput",
+ "FaceInpaintingPageRequest",
+ "FaceInpaintingPageRequestSelectedModel",
+ "FaceInpaintingPageResponse",
+ "FaceInpaintingPageStatusResponse",
+ "FailedReponseModelV2",
+ "FailedResponseDetail",
+ "FinalResponse",
+ "Function",
+ "FunctionsPageOutput",
+ "FunctionsPageRequest",
+ "FunctionsPageResponse",
+ "FunctionsPageStatusResponse",
+ "GenericErrorResponse",
+ "GenericErrorResponseDetail",
+ "GoogleGptPageOutput",
+ "GoogleGptPageRequest",
+ "GoogleGptPageRequestEmbeddingModel",
+ "GoogleGptPageRequestSelectedModel",
+ "GoogleGptPageResponse",
+ "GoogleGptPageStatusResponse",
+ "GoogleImageGenPageOutput",
+ "GoogleImageGenPageRequest",
+ "GoogleImageGenPageRequestSelectedModel",
+ "GoogleImageGenPageResponse",
+ "GoogleImageGenPageStatusResponse",
+ "HttpValidationError",
+ "ImageSegmentationPageOutput",
+ "ImageSegmentationPageRequest",
+ "ImageSegmentationPageRequestSelectedModel",
+ "ImageSegmentationPageResponse",
+ "ImageSegmentationPageStatusResponse",
+ "ImageUrl",
+ "Img2ImgPageOutput",
+ "Img2ImgPageRequest",
+ "Img2ImgPageRequestSelectedControlnetModel",
+ "Img2ImgPageRequestSelectedControlnetModelItem",
+ "Img2ImgPageRequestSelectedModel",
+ "Img2ImgPageResponse",
+ "Img2ImgPageStatusResponse",
+ "LetterWriterPageOutput",
+ "LetterWriterPageRequest",
+ "LetterWriterPageResponse",
+ "LetterWriterPageStatusResponse",
+ "Level",
+ "LipsyncPageOutput",
+ "LipsyncPageRequest",
+ "LipsyncPageRequestSelectedModel",
+ "LipsyncPageResponse",
+ "LipsyncPageStatusResponse",
+ "LipsyncTtsPageOutput",
+ "LipsyncTtsPageRequest",
+ "LipsyncTtsPageRequestOpenaiTtsModel",
+ "LipsyncTtsPageRequestOpenaiVoiceName",
+ "LipsyncTtsPageRequestSelectedModel",
+ "LipsyncTtsPageRequestTtsProvider",
+ "LipsyncTtsPageResponse",
+ "LipsyncTtsPageStatusResponse",
+ "LlmTools",
+ "MessagePart",
+ "ObjectInpaintingPageOutput",
+ "ObjectInpaintingPageRequest",
+ "ObjectInpaintingPageRequestSelectedModel",
+ "ObjectInpaintingPageResponse",
+ "ObjectInpaintingPageStatusResponse",
+ "Preprocess",
+ "Prompt",
+ "PromptTreeNode",
+ "QrCodeGeneratorPageOutput",
+ "QrCodeGeneratorPageRequest",
+ "QrCodeGeneratorPageRequestImagePromptControlnetModelsItem",
+ "QrCodeGeneratorPageRequestSelectedControlnetModelItem",
+ "QrCodeGeneratorPageRequestSelectedModel",
+ "QrCodeGeneratorPageResponse",
+ "QrCodeGeneratorPageStatusResponse",
+ "RecipeFunction",
+ "RecipeRunState",
+ "RelatedDocSearchResponse",
+ "RelatedGoogleGptResponse",
+ "RelatedQnADocPageOutput",
+ "RelatedQnADocPageRequest",
+ "RelatedQnADocPageRequestCitationStyle",
+ "RelatedQnADocPageRequestEmbeddingModel",
+ "RelatedQnADocPageRequestKeywordQuery",
+ "RelatedQnADocPageRequestSelectedModel",
+ "RelatedQnADocPageResponse",
+ "RelatedQnADocPageStatusResponse",
+ "RelatedQnAPageOutput",
+ "RelatedQnAPageRequest",
+ "RelatedQnAPageRequestEmbeddingModel",
+ "RelatedQnAPageRequestSelectedModel",
+ "RelatedQnAPageResponse",
+ "RelatedQnAPageStatusResponse",
+ "ReplyButton",
+ "ResponseModel",
+ "ResponseModelFinalKeywordQuery",
+ "ResponseModelFinalPrompt",
+ "Role",
+ "RunSettings",
+ "RunSettingsRetentionPolicy",
+ "RunStart",
+ "SadTalkerSettings",
+ "Scheduler",
+ "SearchReference",
+ "SeoSummaryPageOutput",
+ "SeoSummaryPageRequest",
+ "SeoSummaryPageRequestSelectedModel",
+ "SeoSummaryPageResponse",
+ "SeoSummaryPageStatusResponse",
+ "SerpSearchLocation",
+ "SerpSearchType",
+ "SmartGptPageOutput",
+ "SmartGptPageRequest",
+ "SmartGptPageRequestSelectedModel",
+ "SmartGptPageResponse",
+ "SmartGptPageStatusResponse",
+ "SocialLookupEmailPageOutput",
+ "SocialLookupEmailPageRequest",
+ "SocialLookupEmailPageRequestSelectedModel",
+ "SocialLookupEmailPageResponse",
+ "SocialLookupEmailPageStatusResponse",
+ "StreamError",
+ "Text2AudioPageOutput",
+ "Text2AudioPageRequest",
+ "Text2AudioPageResponse",
+ "Text2AudioPageStatusResponse",
+ "TextToSpeechPageOutput",
+ "TextToSpeechPageRequest",
+ "TextToSpeechPageRequestOpenaiTtsModel",
+ "TextToSpeechPageRequestOpenaiVoiceName",
+ "TextToSpeechPageRequestTtsProvider",
+ "TextToSpeechPageResponse",
+ "TextToSpeechPageStatusResponse",
+ "TrainingDataModel",
+ "TranslationPageOutput",
+ "TranslationPageRequest",
+ "TranslationPageRequestSelectedModel",
+ "TranslationPageResponse",
+ "TranslationPageStatusResponse",
+ "Trigger",
+ "ValidationError",
+ "ValidationErrorLocItem",
+ "Vcard",
+ "VideoBotsPageOutput",
+ "VideoBotsPageOutputFinalKeywordQuery",
+ "VideoBotsPageOutputFinalPrompt",
+ "VideoBotsPageRequest",
+ "VideoBotsPageRequestAsrModel",
+ "VideoBotsPageRequestCitationStyle",
+ "VideoBotsPageRequestEmbeddingModel",
+ "VideoBotsPageRequestLipsyncModel",
+ "VideoBotsPageRequestOpenaiTtsModel",
+ "VideoBotsPageRequestOpenaiVoiceName",
+ "VideoBotsPageRequestSelectedModel",
+ "VideoBotsPageRequestTranslationModel",
+ "VideoBotsPageRequestTtsProvider",
+ "VideoBotsPageResponse",
+ "VideoBotsPageStatusResponse",
+]
diff --git a/src/gooey/types/agg_function.py b/src/gooey/types/agg_function.py
new file mode 100644
index 0000000..c9963ba
--- /dev/null
+++ b/src/gooey/types/agg_function.py
@@ -0,0 +1,31 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .function import Function
+
+
+class AggFunction(pydantic_v1.BaseModel):
+ column: typing.Optional[str] = None
+ function: Function
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/agg_function_result.py b/src/gooey/types/agg_function_result.py
new file mode 100644
index 0000000..ca86831
--- /dev/null
+++ b/src/gooey/types/agg_function_result.py
@@ -0,0 +1,33 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .function import Function
+
+
+class AggFunctionResult(pydantic_v1.BaseModel):
+ column: str
+ function: Function
+ count: int
+ value: float
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/animation_prompt.py b/src/gooey/types/animation_prompt.py
new file mode 100644
index 0000000..63ca7ce
--- /dev/null
+++ b/src/gooey/types/animation_prompt.py
@@ -0,0 +1,30 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+
+
+class AnimationPrompt(pydantic_v1.BaseModel):
+ frame: str
+ prompt: str
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/asr_chunk.py b/src/gooey/types/asr_chunk.py
new file mode 100644
index 0000000..386eeda
--- /dev/null
+++ b/src/gooey/types/asr_chunk.py
@@ -0,0 +1,31 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+
+
+class AsrChunk(pydantic_v1.BaseModel):
+ timestamp: typing.List[typing.Any]
+ text: str
+ speaker: int
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/asr_output_json.py b/src/gooey/types/asr_output_json.py
new file mode 100644
index 0000000..45e2a46
--- /dev/null
+++ b/src/gooey/types/asr_output_json.py
@@ -0,0 +1,31 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .asr_chunk import AsrChunk
+
+
+class AsrOutputJson(pydantic_v1.BaseModel):
+ text: str
+ chunks: typing.Optional[typing.List[AsrChunk]] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/asr_page_output.py b/src/gooey/types/asr_page_output.py
new file mode 100644
index 0000000..04be9c8
--- /dev/null
+++ b/src/gooey/types/asr_page_output.py
@@ -0,0 +1,33 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .asr_page_output_output_text_item import AsrPageOutputOutputTextItem
+from .called_function_response import CalledFunctionResponse
+
+
+class AsrPageOutput(pydantic_v1.BaseModel):
+ raw_output_text: typing.Optional[typing.List[str]] = None
+ output_text: typing.List[AsrPageOutputOutputTextItem]
+ called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/asr_page_output_output_text_item.py b/src/gooey/types/asr_page_output_output_text_item.py
new file mode 100644
index 0000000..c65822d
--- /dev/null
+++ b/src/gooey/types/asr_page_output_output_text_item.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+from .asr_output_json import AsrOutputJson
+
+AsrPageOutputOutputTextItem = typing.Union[str, AsrOutputJson]
diff --git a/src/gooey/types/asr_page_request.py b/src/gooey/types/asr_page_request.py
new file mode 100644
index 0000000..2ec3985
--- /dev/null
+++ b/src/gooey/types/asr_page_request.py
@@ -0,0 +1,58 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .asr_page_request_output_format import AsrPageRequestOutputFormat
+from .asr_page_request_selected_model import AsrPageRequestSelectedModel
+from .asr_page_request_translation_model import AsrPageRequestTranslationModel
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+
+
+class AsrPageRequest(pydantic_v1.BaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ documents: typing.List[str]
+ selected_model: typing.Optional[AsrPageRequestSelectedModel] = None
+ language: typing.Optional[str] = None
+ translation_model: typing.Optional[AsrPageRequestTranslationModel] = None
+ output_format: typing.Optional[AsrPageRequestOutputFormat] = None
+ google_translate_target: typing.Optional[str] = pydantic_v1.Field(default=None)
+ """
+ use `translation_model` & `translation_target` instead.
+ """
+
+ translation_source: typing.Optional[str] = None
+ translation_target: typing.Optional[str] = None
+ glossary_document: typing.Optional[str] = pydantic_v1.Field(default=None)
+ """
+ Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+ If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+ """
+
+ settings: typing.Optional[RunSettings] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/asr_page_request_output_format.py b/src/gooey/types/asr_page_request_output_format.py
new file mode 100644
index 0000000..101e681
--- /dev/null
+++ b/src/gooey/types/asr_page_request_output_format.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AsrPageRequestOutputFormat = typing.Union[typing.Literal["text", "json", "srt", "vtt"], typing.Any]
diff --git a/src/gooey/types/asr_page_request_selected_model.py b/src/gooey/types/asr_page_request_selected_model.py
new file mode 100644
index 0000000..5180332
--- /dev/null
+++ b/src/gooey/types/asr_page_request_selected_model.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AsrPageRequestSelectedModel = typing.Union[
+ typing.Literal[
+ "whisper_large_v2",
+ "whisper_large_v3",
+ "whisper_hindi_large_v2",
+ "whisper_telugu_large_v2",
+ "nemo_english",
+ "nemo_hindi",
+ "vakyansh_bhojpuri",
+ "gcp_v1",
+ "usm",
+ "deepgram",
+ "azure",
+ "seamless_m4t",
+ "mms_1b_all",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/asr_page_request_translation_model.py b/src/gooey/types/asr_page_request_translation_model.py
new file mode 100644
index 0000000..d5dcef6
--- /dev/null
+++ b/src/gooey/types/asr_page_request_translation_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AsrPageRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any]
diff --git a/src/gooey/types/asr_page_response.py b/src/gooey/types/asr_page_response.py
new file mode 100644
index 0000000..7c82a99
--- /dev/null
+++ b/src/gooey/types/asr_page_response.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .asr_page_output import AsrPageOutput
+
+
+class AsrPageResponse(pydantic_v1.BaseModel):
+ id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ output: AsrPageOutput = pydantic_v1.Field()
+ """
+ Output of the run
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/asr_page_status_response.py b/src/gooey/types/asr_page_status_response.py
new file mode 100644
index 0000000..124ffea
--- /dev/null
+++ b/src/gooey/types/asr_page_status_response.py
@@ -0,0 +1,64 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .asr_page_output import AsrPageOutput
+from .recipe_run_state import RecipeRunState
+
+
+class AsrPageStatusResponse(pydantic_v1.BaseModel):
+ run_id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ web_url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ run_time_sec: int = pydantic_v1.Field()
+ """
+ Total run time in seconds
+ """
+
+ status: RecipeRunState = pydantic_v1.Field()
+ """
+ Status of the run
+ """
+
+ detail: str = pydantic_v1.Field()
+ """
+ Details about the status of the run as a human readable string
+ """
+
+ output: typing.Optional[AsrPageOutput] = pydantic_v1.Field(default=None)
+ """
+ Output of the run. Only available if status is `"completed"`
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/async_api_response_model_v3.py b/src/gooey/types/async_api_response_model_v3.py
new file mode 100644
index 0000000..30c925b
--- /dev/null
+++ b/src/gooey/types/async_api_response_model_v3.py
@@ -0,0 +1,47 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+
+
+class AsyncApiResponseModelV3(pydantic_v1.BaseModel):
+ run_id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ web_url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ status_url: str = pydantic_v1.Field()
+ """
+ URL to check the status of the run. Also included in the `Location` header of the response.
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/balance_response.py b/src/gooey/types/balance_response.py
new file mode 100644
index 0000000..b0eb30b
--- /dev/null
+++ b/src/gooey/types/balance_response.py
@@ -0,0 +1,32 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+
+
+class BalanceResponse(pydantic_v1.BaseModel):
+ balance: int = pydantic_v1.Field()
+ """
+ Current balance in credits
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/bot_broadcast_filters.py b/src/gooey/types/bot_broadcast_filters.py
new file mode 100644
index 0000000..e0636f5
--- /dev/null
+++ b/src/gooey/types/bot_broadcast_filters.py
@@ -0,0 +1,51 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+
+
+class BotBroadcastFilters(pydantic_v1.BaseModel):
+ wa_phone_number_in: typing.Optional[typing.List[str]] = pydantic_v1.Field(alias="wa_phone_number__in", default=None)
+ """
+ A list of WhatsApp phone numbers to broadcast to.
+ """
+
+ slack_user_id_in: typing.Optional[typing.List[str]] = pydantic_v1.Field(alias="slack_user_id__in", default=None)
+ """
+ A list of Slack user IDs to broadcast to.
+ """
+
+ slack_user_name_icontains: typing.Optional[str] = pydantic_v1.Field(
+ alias="slack_user_name__icontains", default=None
+ )
+ """
+ Filter by the Slack user's name. Case insensitive.
+ """
+
+ slack_channel_is_personal: typing.Optional[bool] = pydantic_v1.Field(default=None)
+ """
+ Filter by whether the Slack channel is personal. By default, will broadcast to both public and personal slack channels.
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ allow_population_by_field_name = True
+ populate_by_name = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/bulk_eval_page_output.py b/src/gooey/types/bulk_eval_page_output.py
new file mode 100644
index 0000000..d57a946
--- /dev/null
+++ b/src/gooey/types/bulk_eval_page_output.py
@@ -0,0 +1,34 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .agg_function_result import AggFunctionResult
+from .called_function_response import CalledFunctionResponse
+
+
+class BulkEvalPageOutput(pydantic_v1.BaseModel):
+ output_documents: typing.List[str]
+ final_prompts: typing.Optional[typing.List[typing.List[str]]] = None
+ aggregations: typing.Optional[typing.List[typing.List[AggFunctionResult]]] = None
+ called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/bulk_eval_page_request.py b/src/gooey/types/bulk_eval_page_request.py
new file mode 100644
index 0000000..c72bfe8
--- /dev/null
+++ b/src/gooey/types/bulk_eval_page_request.py
@@ -0,0 +1,64 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .agg_function import AggFunction
+from .bulk_eval_page_request_selected_model import BulkEvalPageRequestSelectedModel
+from .eval_prompt import EvalPrompt
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+
+
+class BulkEvalPageRequest(pydantic_v1.BaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ selected_model: typing.Optional[BulkEvalPageRequestSelectedModel] = None
+ avoid_repetition: typing.Optional[bool] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[float] = None
+ max_tokens: typing.Optional[int] = None
+ sampling_temperature: typing.Optional[float] = None
+ documents: typing.List[str] = pydantic_v1.Field()
+ """
+ Upload or link to a CSV or google sheet that contains your sample input data.
+ For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs.
+ Remember to includes header names in your CSV too.
+ """
+
+ eval_prompts: typing.Optional[typing.List[EvalPrompt]] = pydantic_v1.Field(default=None)
+ """
+ Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values.
+ _The `columns` dictionary can be used to reference the spreadsheet columns._
+ """
+
+ agg_functions: typing.Optional[typing.List[AggFunction]] = pydantic_v1.Field(default=None)
+ """
+ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats).
+ """
+
+ settings: typing.Optional[RunSettings] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/bulk_eval_page_request_selected_model.py b/src/gooey/types/bulk_eval_page_request_selected_model.py
new file mode 100644
index 0000000..6175087
--- /dev/null
+++ b/src/gooey/types/bulk_eval_page_request_selected_model.py
@@ -0,0 +1,39 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+BulkEvalPageRequestSelectedModel = typing.Union[
+ typing.Literal[
+ "gpt_4_o",
+ "gpt_4_turbo_vision",
+ "gpt_4_vision",
+ "gpt_4_turbo",
+ "gpt_4",
+ "gpt_4_32k",
+ "gpt_3_5_turbo",
+ "gpt_3_5_turbo_16k",
+ "gpt_3_5_turbo_instruct",
+ "llama3_70b",
+ "llama3_8b",
+ "llama2_70b_chat",
+ "mixtral_8x7b_instruct_0_1",
+ "gemma_7b_it",
+ "gemini_1_5_pro",
+ "gemini_1_pro_vision",
+ "gemini_1_pro",
+ "palm2_chat",
+ "palm2_text",
+ "claude_3_5_sonnet",
+ "claude_3_opus",
+ "claude_3_sonnet",
+ "claude_3_haiku",
+ "sea_lion_7b_instruct",
+ "text_davinci_003",
+ "text_davinci_002",
+ "code_davinci_002",
+ "text_curie_001",
+ "text_babbage_001",
+ "text_ada_001",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/bulk_eval_page_response.py b/src/gooey/types/bulk_eval_page_response.py
new file mode 100644
index 0000000..3887009
--- /dev/null
+++ b/src/gooey/types/bulk_eval_page_response.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .bulk_eval_page_output import BulkEvalPageOutput
+
+
+class BulkEvalPageResponse(pydantic_v1.BaseModel):
+ id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ output: BulkEvalPageOutput = pydantic_v1.Field()
+ """
+ Output of the run
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/bulk_eval_page_status_response.py b/src/gooey/types/bulk_eval_page_status_response.py
new file mode 100644
index 0000000..1ce7fe4
--- /dev/null
+++ b/src/gooey/types/bulk_eval_page_status_response.py
@@ -0,0 +1,64 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .bulk_eval_page_output import BulkEvalPageOutput
+from .recipe_run_state import RecipeRunState
+
+
+class BulkEvalPageStatusResponse(pydantic_v1.BaseModel):
+ run_id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ web_url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ run_time_sec: int = pydantic_v1.Field()
+ """
+ Total run time in seconds
+ """
+
+ status: RecipeRunState = pydantic_v1.Field()
+ """
+ Status of the run
+ """
+
+ detail: str = pydantic_v1.Field()
+ """
+ Details about the status of the run as a human readable string
+ """
+
+ output: typing.Optional[BulkEvalPageOutput] = pydantic_v1.Field(default=None)
+ """
+ Output of the run. Only available if status is `"completed"`
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/bulk_runner_page_output.py b/src/gooey/types/bulk_runner_page_output.py
new file mode 100644
index 0000000..ef202d4
--- /dev/null
+++ b/src/gooey/types/bulk_runner_page_output.py
@@ -0,0 +1,36 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .called_function_response import CalledFunctionResponse
+
+
+class BulkRunnerPageOutput(pydantic_v1.BaseModel):
+ output_documents: typing.List[str]
+ eval_runs: typing.Optional[typing.List[str]] = pydantic_v1.Field(default=None)
+ """
+ List of URLs to the evaluation runs that you requested.
+ """
+
+ called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/bulk_runner_page_request.py b/src/gooey/types/bulk_runner_page_request.py
new file mode 100644
index 0000000..b0a7aaf
--- /dev/null
+++ b/src/gooey/types/bulk_runner_page_request.py
@@ -0,0 +1,65 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+
+
+class BulkRunnerPageRequest(pydantic_v1.BaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ documents: typing.List[str] = pydantic_v1.Field()
+ """
+ Upload or link to a CSV or google sheet that contains your sample input data.
+ For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs.
+ Remember to includes header names in your CSV too.
+ """
+
+ run_urls: typing.List[str] = pydantic_v1.Field()
+ """
+ Provide one or more Gooey.AI workflow runs.
+ You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them.
+ """
+
+ input_columns: typing.Dict[str, str] = pydantic_v1.Field()
+ """
+ For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it.
+ """
+
+ output_columns: typing.Dict[str, str] = pydantic_v1.Field()
+ """
+ For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data.
+ """
+
+ eval_urls: typing.Optional[typing.List[str]] = pydantic_v1.Field(default=None)
+ """
+ _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs.
+ """
+
+ settings: typing.Optional[RunSettings] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/bulk_runner_page_response.py b/src/gooey/types/bulk_runner_page_response.py
new file mode 100644
index 0000000..c926eff
--- /dev/null
+++ b/src/gooey/types/bulk_runner_page_response.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .bulk_runner_page_output import BulkRunnerPageOutput
+
+
+class BulkRunnerPageResponse(pydantic_v1.BaseModel):
+ id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ output: BulkRunnerPageOutput = pydantic_v1.Field()
+ """
+ Output of the run
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/bulk_runner_page_status_response.py b/src/gooey/types/bulk_runner_page_status_response.py
new file mode 100644
index 0000000..03e2fcf
--- /dev/null
+++ b/src/gooey/types/bulk_runner_page_status_response.py
@@ -0,0 +1,64 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .bulk_runner_page_output import BulkRunnerPageOutput
+from .recipe_run_state import RecipeRunState
+
+
+class BulkRunnerPageStatusResponse(pydantic_v1.BaseModel):
+ run_id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ web_url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ run_time_sec: int = pydantic_v1.Field()
+ """
+ Total run time in seconds
+ """
+
+ status: RecipeRunState = pydantic_v1.Field()
+ """
+ Status of the run
+ """
+
+ detail: str = pydantic_v1.Field()
+ """
+ Details about the status of the run as a human readable string
+ """
+
+ output: typing.Optional[BulkRunnerPageOutput] = pydantic_v1.Field(default=None)
+ """
+ Output of the run. Only available if status is `"completed"`
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/button_pressed.py b/src/gooey/types/button_pressed.py
new file mode 100644
index 0000000..17be9f8
--- /dev/null
+++ b/src/gooey/types/button_pressed.py
@@ -0,0 +1,37 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+
+
+class ButtonPressed(pydantic_v1.BaseModel):
+ button_id: str = pydantic_v1.Field()
+ """
+ The ID of the button that was pressed by the user
+ """
+
+ context_msg_id: str = pydantic_v1.Field()
+ """
+ The message ID of the context message on which the button was pressed
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/called_function_response.py b/src/gooey/types/called_function_response.py
new file mode 100644
index 0000000..99d69e4
--- /dev/null
+++ b/src/gooey/types/called_function_response.py
@@ -0,0 +1,32 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .trigger import Trigger
+
+
+class CalledFunctionResponse(pydantic_v1.BaseModel):
+ url: str
+ trigger: Trigger
+ return_value: typing.Optional[typing.Any] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/chat_completion_content_part_image_param.py b/src/gooey/types/chat_completion_content_part_image_param.py
new file mode 100644
index 0000000..e50ad85
--- /dev/null
+++ b/src/gooey/types/chat_completion_content_part_image_param.py
@@ -0,0 +1,30 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .image_url import ImageUrl
+
+
+class ChatCompletionContentPartImageParam(pydantic_v1.BaseModel):
+ image_url: typing.Optional[ImageUrl] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/chat_completion_content_part_text_param.py b/src/gooey/types/chat_completion_content_part_text_param.py
new file mode 100644
index 0000000..3dc21b8
--- /dev/null
+++ b/src/gooey/types/chat_completion_content_part_text_param.py
@@ -0,0 +1,29 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+
+
+class ChatCompletionContentPartTextParam(pydantic_v1.BaseModel):
+ text: typing.Optional[str] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/chyron_plant_page_output.py b/src/gooey/types/chyron_plant_page_output.py
new file mode 100644
index 0000000..d084d18
--- /dev/null
+++ b/src/gooey/types/chyron_plant_page_output.py
@@ -0,0 +1,32 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .called_function_response import CalledFunctionResponse
+
+
+class ChyronPlantPageOutput(pydantic_v1.BaseModel):
+ midi_translation: str
+ chyron_output: str
+ called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/chyron_plant_page_request.py b/src/gooey/types/chyron_plant_page_request.py
new file mode 100644
index 0000000..d6ac34e
--- /dev/null
+++ b/src/gooey/types/chyron_plant_page_request.py
@@ -0,0 +1,40 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+
+
+class ChyronPlantPageRequest(pydantic_v1.BaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ midi_notes: str
+ midi_notes_prompt: typing.Optional[str] = None
+ chyron_prompt: typing.Optional[str] = None
+ settings: typing.Optional[RunSettings] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/chyron_plant_page_response.py b/src/gooey/types/chyron_plant_page_response.py
new file mode 100644
index 0000000..01f6145
--- /dev/null
+++ b/src/gooey/types/chyron_plant_page_response.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .chyron_plant_page_output import ChyronPlantPageOutput
+
+
+class ChyronPlantPageResponse(pydantic_v1.BaseModel):
+ id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ output: ChyronPlantPageOutput = pydantic_v1.Field()
+ """
+ Output of the run
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/chyron_plant_page_status_response.py b/src/gooey/types/chyron_plant_page_status_response.py
new file mode 100644
index 0000000..fcff542
--- /dev/null
+++ b/src/gooey/types/chyron_plant_page_status_response.py
@@ -0,0 +1,64 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .chyron_plant_page_output import ChyronPlantPageOutput
+from .recipe_run_state import RecipeRunState
+
+
+class ChyronPlantPageStatusResponse(pydantic_v1.BaseModel):
+ run_id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ web_url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ run_time_sec: int = pydantic_v1.Field()
+ """
+ Total run time in seconds
+ """
+
+ status: RecipeRunState = pydantic_v1.Field()
+ """
+ Status of the run
+ """
+
+ detail: str = pydantic_v1.Field()
+ """
+ Details about the status of the run as a human readable string
+ """
+
+ output: typing.Optional[ChyronPlantPageOutput] = pydantic_v1.Field(default=None)
+ """
+ Output of the run. Only available if status is `"completed"`
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/compare_llm_page_output.py b/src/gooey/types/compare_llm_page_output.py
new file mode 100644
index 0000000..d4bf9be
--- /dev/null
+++ b/src/gooey/types/compare_llm_page_output.py
@@ -0,0 +1,31 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .called_function_response import CalledFunctionResponse
+
+
+class CompareLlmPageOutput(pydantic_v1.BaseModel):
+ output_text: typing.Dict[str, typing.List[str]]
+ called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/compare_llm_page_request.py b/src/gooey/types/compare_llm_page_request.py
new file mode 100644
index 0000000..304a2ce
--- /dev/null
+++ b/src/gooey/types/compare_llm_page_request.py
@@ -0,0 +1,47 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .compare_llm_page_request_response_format_type import CompareLlmPageRequestResponseFormatType
+from .compare_llm_page_request_selected_models_item import CompareLlmPageRequestSelectedModelsItem
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+
+
+class CompareLlmPageRequest(pydantic_v1.BaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ input_prompt: typing.Optional[str] = None
+ selected_models: typing.Optional[typing.List[CompareLlmPageRequestSelectedModelsItem]] = None
+ avoid_repetition: typing.Optional[bool] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[float] = None
+ max_tokens: typing.Optional[int] = None
+ sampling_temperature: typing.Optional[float] = None
+ response_format_type: typing.Optional[CompareLlmPageRequestResponseFormatType] = None
+ settings: typing.Optional[RunSettings] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/compare_llm_page_request_response_format_type.py b/src/gooey/types/compare_llm_page_request_response_format_type.py
new file mode 100644
index 0000000..a846068
--- /dev/null
+++ b/src/gooey/types/compare_llm_page_request_response_format_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+CompareLlmPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/types/compare_llm_page_request_selected_models_item.py b/src/gooey/types/compare_llm_page_request_selected_models_item.py
new file mode 100644
index 0000000..14654d5
--- /dev/null
+++ b/src/gooey/types/compare_llm_page_request_selected_models_item.py
@@ -0,0 +1,39 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+CompareLlmPageRequestSelectedModelsItem = typing.Union[
+ typing.Literal[
+ "gpt_4_o",
+ "gpt_4_turbo_vision",
+ "gpt_4_vision",
+ "gpt_4_turbo",
+ "gpt_4",
+ "gpt_4_32k",
+ "gpt_3_5_turbo",
+ "gpt_3_5_turbo_16k",
+ "gpt_3_5_turbo_instruct",
+ "llama3_70b",
+ "llama3_8b",
+ "llama2_70b_chat",
+ "mixtral_8x7b_instruct_0_1",
+ "gemma_7b_it",
+ "gemini_1_5_pro",
+ "gemini_1_pro_vision",
+ "gemini_1_pro",
+ "palm2_chat",
+ "palm2_text",
+ "claude_3_5_sonnet",
+ "claude_3_opus",
+ "claude_3_sonnet",
+ "claude_3_haiku",
+ "sea_lion_7b_instruct",
+ "text_davinci_003",
+ "text_davinci_002",
+ "code_davinci_002",
+ "text_curie_001",
+ "text_babbage_001",
+ "text_ada_001",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/compare_llm_page_response.py b/src/gooey/types/compare_llm_page_response.py
new file mode 100644
index 0000000..dd8e5b4
--- /dev/null
+++ b/src/gooey/types/compare_llm_page_response.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .compare_llm_page_output import CompareLlmPageOutput
+
+
+class CompareLlmPageResponse(pydantic_v1.BaseModel):
+ id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ output: CompareLlmPageOutput = pydantic_v1.Field()
+ """
+ Output of the run
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/compare_llm_page_status_response.py b/src/gooey/types/compare_llm_page_status_response.py
new file mode 100644
index 0000000..3dfb142
--- /dev/null
+++ b/src/gooey/types/compare_llm_page_status_response.py
@@ -0,0 +1,64 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .compare_llm_page_output import CompareLlmPageOutput
+from .recipe_run_state import RecipeRunState
+
+
+class CompareLlmPageStatusResponse(pydantic_v1.BaseModel):
+ run_id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ web_url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ run_time_sec: int = pydantic_v1.Field()
+ """
+ Total run time in seconds
+ """
+
+ status: RecipeRunState = pydantic_v1.Field()
+ """
+ Status of the run
+ """
+
+ detail: str = pydantic_v1.Field()
+ """
+ Details about the status of the run as a human readable string
+ """
+
+ output: typing.Optional[CompareLlmPageOutput] = pydantic_v1.Field(default=None)
+ """
+ Output of the run. Only available if status is `"completed"`
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/compare_text2img_page_output.py b/src/gooey/types/compare_text2img_page_output.py
new file mode 100644
index 0000000..42c3cb0
--- /dev/null
+++ b/src/gooey/types/compare_text2img_page_output.py
@@ -0,0 +1,31 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .called_function_response import CalledFunctionResponse
+
+
+class CompareText2ImgPageOutput(pydantic_v1.BaseModel):
+ output_images: typing.Dict[str, typing.List[str]]
+ called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/compare_text2img_page_request.py b/src/gooey/types/compare_text2img_page_request.py
new file mode 100644
index 0000000..0811696
--- /dev/null
+++ b/src/gooey/types/compare_text2img_page_request.py
@@ -0,0 +1,56 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .compare_text2img_page_request_selected_models_item import CompareText2ImgPageRequestSelectedModelsItem
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+from .scheduler import Scheduler
+
+
+class CompareText2ImgPageRequest(pydantic_v1.BaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ text_prompt: str
+ negative_prompt: typing.Optional[str] = None
+ output_width: typing.Optional[int] = None
+ output_height: typing.Optional[int] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[int] = None
+ dall_e3quality: typing.Optional[str] = pydantic_v1.Field(alias="dall_e_3_quality", default=None)
+ dall_e3style: typing.Optional[str] = pydantic_v1.Field(alias="dall_e_3_style", default=None)
+ guidance_scale: typing.Optional[float] = None
+ seed: typing.Optional[int] = None
+ sd2upscaling: typing.Optional[bool] = pydantic_v1.Field(alias="sd_2_upscaling", default=None)
+ selected_models: typing.Optional[typing.List[CompareText2ImgPageRequestSelectedModelsItem]] = None
+ scheduler: typing.Optional[Scheduler] = None
+ edit_instruction: typing.Optional[str] = None
+ image_guidance_scale: typing.Optional[float] = None
+ settings: typing.Optional[RunSettings] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ allow_population_by_field_name = True
+ populate_by_name = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/compare_text2img_page_request_selected_models_item.py b/src/gooey/types/compare_text2img_page_request_selected_models_item.py
new file mode 100644
index 0000000..4154491
--- /dev/null
+++ b/src/gooey/types/compare_text2img_page_request_selected_models_item.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+CompareText2ImgPageRequestSelectedModelsItem = typing.Union[
+ typing.Literal[
+ "dream_shaper",
+ "dreamlike_2",
+ "sd_2",
+ "sd_1_5",
+ "dall_e",
+ "dall_e_3",
+ "openjourney_2",
+ "openjourney",
+ "analog_diffusion",
+ "protogen_5_3",
+ "jack_qiao",
+ "rodent_diffusion_1_5",
+ "deepfloyd_if",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/compare_text2img_page_response.py b/src/gooey/types/compare_text2img_page_response.py
new file mode 100644
index 0000000..232ef5e
--- /dev/null
+++ b/src/gooey/types/compare_text2img_page_response.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .compare_text2img_page_output import CompareText2ImgPageOutput
+
+
+class CompareText2ImgPageResponse(pydantic_v1.BaseModel):
+ id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ output: CompareText2ImgPageOutput = pydantic_v1.Field()
+ """
+ Output of the run
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/compare_text2img_page_status_response.py b/src/gooey/types/compare_text2img_page_status_response.py
new file mode 100644
index 0000000..b170016
--- /dev/null
+++ b/src/gooey/types/compare_text2img_page_status_response.py
@@ -0,0 +1,64 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .compare_text2img_page_output import CompareText2ImgPageOutput
+from .recipe_run_state import RecipeRunState
+
+
+class CompareText2ImgPageStatusResponse(pydantic_v1.BaseModel):
+ run_id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ web_url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ run_time_sec: int = pydantic_v1.Field()
+ """
+ Total run time in seconds
+ """
+
+ status: RecipeRunState = pydantic_v1.Field()
+ """
+ Status of the run
+ """
+
+ detail: str = pydantic_v1.Field()
+ """
+ Details about the status of the run as a human readable string
+ """
+
+ output: typing.Optional[CompareText2ImgPageOutput] = pydantic_v1.Field(default=None)
+ """
+ Output of the run. Only available if status is `"completed"`
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/compare_upscaler_page_output.py b/src/gooey/types/compare_upscaler_page_output.py
new file mode 100644
index 0000000..b2527db
--- /dev/null
+++ b/src/gooey/types/compare_upscaler_page_output.py
@@ -0,0 +1,40 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .called_function_response import CalledFunctionResponse
+
+
+class CompareUpscalerPageOutput(pydantic_v1.BaseModel):
+ output_images: typing.Optional[typing.Dict[str, str]] = pydantic_v1.Field(default=None)
+ """
+ Output Images
+ """
+
+ output_videos: typing.Optional[typing.Dict[str, str]] = pydantic_v1.Field(default=None)
+ """
+ Output Videos
+ """
+
+ called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/compare_upscaler_page_request.py b/src/gooey/types/compare_upscaler_page_request.py
new file mode 100644
index 0000000..3d2fb6b
--- /dev/null
+++ b/src/gooey/types/compare_upscaler_page_request.py
@@ -0,0 +1,55 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .compare_upscaler_page_request_selected_models_item import CompareUpscalerPageRequestSelectedModelsItem
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+
+
+class CompareUpscalerPageRequest(pydantic_v1.BaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ input_image: typing.Optional[str] = pydantic_v1.Field(default=None)
+ """
+ Input Image
+ """
+
+ input_video: typing.Optional[str] = pydantic_v1.Field(default=None)
+ """
+ Input Video
+ """
+
+ scale: int = pydantic_v1.Field()
+ """
+ The final upsampling scale of the image
+ """
+
+ selected_models: typing.Optional[typing.List[CompareUpscalerPageRequestSelectedModelsItem]] = None
+ selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = None
+ settings: typing.Optional[RunSettings] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/compare_upscaler_page_request_selected_models_item.py b/src/gooey/types/compare_upscaler_page_request_selected_models_item.py
new file mode 100644
index 0000000..eff4f6e
--- /dev/null
+++ b/src/gooey/types/compare_upscaler_page_request_selected_models_item.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+CompareUpscalerPageRequestSelectedModelsItem = typing.Union[
+ typing.Literal["gfpgan_1_4", "real_esrgan_x2", "sd_x4", "real_esrgan", "gfpgan"], typing.Any
+]
diff --git a/src/gooey/types/compare_upscaler_page_response.py b/src/gooey/types/compare_upscaler_page_response.py
new file mode 100644
index 0000000..847f4ce
--- /dev/null
+++ b/src/gooey/types/compare_upscaler_page_response.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .compare_upscaler_page_output import CompareUpscalerPageOutput
+
+
+class CompareUpscalerPageResponse(pydantic_v1.BaseModel):
+ id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ output: CompareUpscalerPageOutput = pydantic_v1.Field()
+ """
+ Output of the run
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/compare_upscaler_page_status_response.py b/src/gooey/types/compare_upscaler_page_status_response.py
new file mode 100644
index 0000000..e0c7103
--- /dev/null
+++ b/src/gooey/types/compare_upscaler_page_status_response.py
@@ -0,0 +1,64 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .compare_upscaler_page_output import CompareUpscalerPageOutput
+from .recipe_run_state import RecipeRunState
+
+
+class CompareUpscalerPageStatusResponse(pydantic_v1.BaseModel):
+ run_id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ web_url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ run_time_sec: int = pydantic_v1.Field()
+ """
+ Total run time in seconds
+ """
+
+ status: RecipeRunState = pydantic_v1.Field()
+ """
+ Status of the run
+ """
+
+ detail: str = pydantic_v1.Field()
+ """
+ Details about the status of the run as a human readable string
+ """
+
+ output: typing.Optional[CompareUpscalerPageOutput] = pydantic_v1.Field(default=None)
+ """
+ Output of the run. Only available if status is `"completed"`
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/console_logs.py b/src/gooey/types/console_logs.py
new file mode 100644
index 0000000..234bc4e
--- /dev/null
+++ b/src/gooey/types/console_logs.py
@@ -0,0 +1,31 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .level import Level
+
+
+class ConsoleLogs(pydantic_v1.BaseModel):
+ level: Level
+ message: str
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/content.py b/src/gooey/types/content.py
new file mode 100644
index 0000000..1e48eb8
--- /dev/null
+++ b/src/gooey/types/content.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+from .conversation_entry_content_item import ConversationEntryContentItem
+
+Content = typing.Union[str, typing.List[ConversationEntryContentItem]]
diff --git a/src/gooey/types/conversation_entry.py b/src/gooey/types/conversation_entry.py
new file mode 100644
index 0000000..e990c17
--- /dev/null
+++ b/src/gooey/types/conversation_entry.py
@@ -0,0 +1,33 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .content import Content
+from .role import Role
+
+
+class ConversationEntry(pydantic_v1.BaseModel):
+ role: Role
+ content: Content
+ display_name: typing.Optional[str] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/conversation_entry_content_item.py b/src/gooey/types/conversation_entry_content_item.py
new file mode 100644
index 0000000..bc952a6
--- /dev/null
+++ b/src/gooey/types/conversation_entry_content_item.py
@@ -0,0 +1,59 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .image_url import ImageUrl
+
+
+class ConversationEntryContentItem_Text(pydantic_v1.BaseModel):
+ text: typing.Optional[str] = None
+ type: typing.Literal["text"] = "text"
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
+
+
+class ConversationEntryContentItem_ImageUrl(pydantic_v1.BaseModel):
+ image_url: typing.Optional[ImageUrl] = None
+ type: typing.Literal["image_url"] = "image_url"
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
+
+
+ConversationEntryContentItem = typing.Union[ConversationEntryContentItem_Text, ConversationEntryContentItem_ImageUrl]
diff --git a/src/gooey/types/conversation_start.py b/src/gooey/types/conversation_start.py
new file mode 100644
index 0000000..5b20d5e
--- /dev/null
+++ b/src/gooey/types/conversation_start.py
@@ -0,0 +1,57 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+
+
+class ConversationStart(pydantic_v1.BaseModel):
+ conversation_id: str = pydantic_v1.Field()
+ """
+ The conversation ID you provided in the request, or a random ID if not provided
+ """
+
+ user_id: str = pydantic_v1.Field()
+ """
+ The user ID associated with this conversation
+ """
+
+ user_message_id: str = pydantic_v1.Field()
+ """
+ The user message ID you provided in the request, or a random ID if not provided.
+ """
+
+ bot_message_id: str = pydantic_v1.Field()
+ """
+ The bot message ID. Use this ID as the `context_msg_id` when sending a `button_pressed`.
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the conversation was created as ISO format
+ """
+
+ type: typing.Optional[str] = pydantic_v1.Field(default=None)
+ """
+ The conversation was started. Save the IDs for future requests.
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/create_stream_response.py b/src/gooey/types/create_stream_response.py
new file mode 100644
index 0000000..20cb228
--- /dev/null
+++ b/src/gooey/types/create_stream_response.py
@@ -0,0 +1,32 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+
+
+class CreateStreamResponse(pydantic_v1.BaseModel):
+ stream_url: str = pydantic_v1.Field()
+ """
+ The URL to stream the conversation. Use Server-Sent Events (SSE) to stream the response.
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/deforum_sd_page_output.py b/src/gooey/types/deforum_sd_page_output.py
new file mode 100644
index 0000000..b08e422
--- /dev/null
+++ b/src/gooey/types/deforum_sd_page_output.py
@@ -0,0 +1,31 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .called_function_response import CalledFunctionResponse
+
+
+class DeforumSdPageOutput(pydantic_v1.BaseModel):
+ output_video: str
+ called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/deforum_sd_page_request.py b/src/gooey/types/deforum_sd_page_request.py
new file mode 100644
index 0000000..07bcb53
--- /dev/null
+++ b/src/gooey/types/deforum_sd_page_request.py
@@ -0,0 +1,53 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .animation_prompt import AnimationPrompt
+from .deforum_sd_page_request_selected_model import DeforumSdPageRequestSelectedModel
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+
+
+class DeforumSdPageRequest(pydantic_v1.BaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ animation_prompts: typing.List[AnimationPrompt]
+ max_frames: typing.Optional[int] = None
+ selected_model: typing.Optional[DeforumSdPageRequestSelectedModel] = None
+ animation_mode: typing.Optional[str] = None
+ zoom: typing.Optional[str] = None
+ translation_x: typing.Optional[str] = None
+ translation_y: typing.Optional[str] = None
+ rotation3d_x: typing.Optional[str] = pydantic_v1.Field(alias="rotation_3d_x", default=None)
+ rotation3d_y: typing.Optional[str] = pydantic_v1.Field(alias="rotation_3d_y", default=None)
+ rotation3d_z: typing.Optional[str] = pydantic_v1.Field(alias="rotation_3d_z", default=None)
+ fps: typing.Optional[int] = None
+ seed: typing.Optional[int] = None
+ settings: typing.Optional[RunSettings] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ allow_population_by_field_name = True
+ populate_by_name = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/deforum_sd_page_request_selected_model.py b/src/gooey/types/deforum_sd_page_request_selected_model.py
new file mode 100644
index 0000000..3af657a
--- /dev/null
+++ b/src/gooey/types/deforum_sd_page_request_selected_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+DeforumSdPageRequestSelectedModel = typing.Union[typing.Literal["protogen_2_2", "epicdream"], typing.Any]
diff --git a/src/gooey/types/deforum_sd_page_response.py b/src/gooey/types/deforum_sd_page_response.py
new file mode 100644
index 0000000..464c1e0
--- /dev/null
+++ b/src/gooey/types/deforum_sd_page_response.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .deforum_sd_page_output import DeforumSdPageOutput
+
+
+class DeforumSdPageResponse(pydantic_v1.BaseModel):
+ id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ output: DeforumSdPageOutput = pydantic_v1.Field()
+ """
+ Output of the run
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/deforum_sd_page_status_response.py b/src/gooey/types/deforum_sd_page_status_response.py
new file mode 100644
index 0000000..df3eef6
--- /dev/null
+++ b/src/gooey/types/deforum_sd_page_status_response.py
@@ -0,0 +1,64 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .deforum_sd_page_output import DeforumSdPageOutput
+from .recipe_run_state import RecipeRunState
+
+
+class DeforumSdPageStatusResponse(pydantic_v1.BaseModel):
+ run_id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ web_url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ run_time_sec: int = pydantic_v1.Field()
+ """
+ Total run time in seconds
+ """
+
+ status: RecipeRunState = pydantic_v1.Field()
+ """
+ Status of the run
+ """
+
+ detail: str = pydantic_v1.Field()
+ """
+ Details about the status of the run as a human readable string
+ """
+
+ output: typing.Optional[DeforumSdPageOutput] = pydantic_v1.Field(default=None)
+ """
+ Output of the run. Only available if status is `"completed"`
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/detail.py b/src/gooey/types/detail.py
new file mode 100644
index 0000000..2b6070d
--- /dev/null
+++ b/src/gooey/types/detail.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+Detail = typing.Union[typing.Literal["auto", "low", "high"], typing.Any]
diff --git a/src/gooey/types/doc_extract_page_output.py b/src/gooey/types/doc_extract_page_output.py
new file mode 100644
index 0000000..067484a
--- /dev/null
+++ b/src/gooey/types/doc_extract_page_output.py
@@ -0,0 +1,30 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .called_function_response import CalledFunctionResponse
+
+
+class DocExtractPageOutput(pydantic_v1.BaseModel):
+ called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/doc_extract_page_request.py b/src/gooey/types/doc_extract_page_request.py
new file mode 100644
index 0000000..4b15828
--- /dev/null
+++ b/src/gooey/types/doc_extract_page_request.py
@@ -0,0 +1,56 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .doc_extract_page_request_selected_asr_model import DocExtractPageRequestSelectedAsrModel
+from .doc_extract_page_request_selected_model import DocExtractPageRequestSelectedModel
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+
+
+class DocExtractPageRequest(pydantic_v1.BaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ documents: typing.List[str]
+ sheet_url: typing.Optional[str] = None
+ selected_asr_model: typing.Optional[DocExtractPageRequestSelectedAsrModel] = None
+ google_translate_target: typing.Optional[str] = None
+ glossary_document: typing.Optional[str] = pydantic_v1.Field(default=None)
+ """
+ Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+ If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+ """
+
+ task_instructions: typing.Optional[str] = None
+ selected_model: typing.Optional[DocExtractPageRequestSelectedModel] = None
+ avoid_repetition: typing.Optional[bool] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[float] = None
+ max_tokens: typing.Optional[int] = None
+ sampling_temperature: typing.Optional[float] = None
+ settings: typing.Optional[RunSettings] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/doc_extract_page_request_selected_asr_model.py b/src/gooey/types/doc_extract_page_request_selected_asr_model.py
new file mode 100644
index 0000000..1640e23
--- /dev/null
+++ b/src/gooey/types/doc_extract_page_request_selected_asr_model.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+DocExtractPageRequestSelectedAsrModel = typing.Union[
+ typing.Literal[
+ "whisper_large_v2",
+ "whisper_large_v3",
+ "whisper_hindi_large_v2",
+ "whisper_telugu_large_v2",
+ "nemo_english",
+ "nemo_hindi",
+ "vakyansh_bhojpuri",
+ "gcp_v1",
+ "usm",
+ "deepgram",
+ "azure",
+ "seamless_m4t",
+ "mms_1b_all",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/doc_extract_page_request_selected_model.py b/src/gooey/types/doc_extract_page_request_selected_model.py
new file mode 100644
index 0000000..32fc17b
--- /dev/null
+++ b/src/gooey/types/doc_extract_page_request_selected_model.py
@@ -0,0 +1,39 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+DocExtractPageRequestSelectedModel = typing.Union[
+ typing.Literal[
+ "gpt_4_o",
+ "gpt_4_turbo_vision",
+ "gpt_4_vision",
+ "gpt_4_turbo",
+ "gpt_4",
+ "gpt_4_32k",
+ "gpt_3_5_turbo",
+ "gpt_3_5_turbo_16k",
+ "gpt_3_5_turbo_instruct",
+ "llama3_70b",
+ "llama3_8b",
+ "llama2_70b_chat",
+ "mixtral_8x7b_instruct_0_1",
+ "gemma_7b_it",
+ "gemini_1_5_pro",
+ "gemini_1_pro_vision",
+ "gemini_1_pro",
+ "palm2_chat",
+ "palm2_text",
+ "claude_3_5_sonnet",
+ "claude_3_opus",
+ "claude_3_sonnet",
+ "claude_3_haiku",
+ "sea_lion_7b_instruct",
+ "text_davinci_003",
+ "text_davinci_002",
+ "code_davinci_002",
+ "text_curie_001",
+ "text_babbage_001",
+ "text_ada_001",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/doc_extract_page_response.py b/src/gooey/types/doc_extract_page_response.py
new file mode 100644
index 0000000..7ce1055
--- /dev/null
+++ b/src/gooey/types/doc_extract_page_response.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .doc_extract_page_output import DocExtractPageOutput
+
+
+class DocExtractPageResponse(pydantic_v1.BaseModel):
+ id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ output: DocExtractPageOutput = pydantic_v1.Field()
+ """
+ Output of the run
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/doc_extract_page_status_response.py b/src/gooey/types/doc_extract_page_status_response.py
new file mode 100644
index 0000000..4d24fa3
--- /dev/null
+++ b/src/gooey/types/doc_extract_page_status_response.py
@@ -0,0 +1,64 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .doc_extract_page_output import DocExtractPageOutput
+from .recipe_run_state import RecipeRunState
+
+
+class DocExtractPageStatusResponse(pydantic_v1.BaseModel):
+ run_id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ web_url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ run_time_sec: int = pydantic_v1.Field()
+ """
+ Total run time in seconds
+ """
+
+ status: RecipeRunState = pydantic_v1.Field()
+ """
+ Status of the run
+ """
+
+ detail: str = pydantic_v1.Field()
+ """
+ Details about the status of the run as a human readable string
+ """
+
+ output: typing.Optional[DocExtractPageOutput] = pydantic_v1.Field(default=None)
+ """
+ Output of the run. Only available if status is `"completed"`
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/doc_search_page_output.py b/src/gooey/types/doc_search_page_output.py
new file mode 100644
index 0000000..c9bb1d0
--- /dev/null
+++ b/src/gooey/types/doc_search_page_output.py
@@ -0,0 +1,35 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .called_function_response import CalledFunctionResponse
+from .search_reference import SearchReference
+
+
+class DocSearchPageOutput(pydantic_v1.BaseModel):
+ output_text: typing.List[str]
+ references: typing.List[SearchReference]
+ final_prompt: str
+ final_search_query: typing.Optional[str] = None
+ called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/doc_search_page_request.py b/src/gooey/types/doc_search_page_request.py
new file mode 100644
index 0000000..8107ad2
--- /dev/null
+++ b/src/gooey/types/doc_search_page_request.py
@@ -0,0 +1,64 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .doc_search_page_request_citation_style import DocSearchPageRequestCitationStyle
+from .doc_search_page_request_embedding_model import DocSearchPageRequestEmbeddingModel
+from .doc_search_page_request_keyword_query import DocSearchPageRequestKeywordQuery
+from .doc_search_page_request_selected_model import DocSearchPageRequestSelectedModel
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+
+
+class DocSearchPageRequest(pydantic_v1.BaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ search_query: str
+ keyword_query: typing.Optional[DocSearchPageRequestKeywordQuery] = None
+ documents: typing.Optional[typing.List[str]] = None
+ max_references: typing.Optional[int] = None
+ max_context_words: typing.Optional[int] = None
+ scroll_jump: typing.Optional[int] = None
+ doc_extract_url: typing.Optional[str] = None
+ embedding_model: typing.Optional[DocSearchPageRequestEmbeddingModel] = None
+ dense_weight: typing.Optional[float] = pydantic_v1.Field(default=None)
+ """
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+ """
+
+ task_instructions: typing.Optional[str] = None
+ query_instructions: typing.Optional[str] = None
+ selected_model: typing.Optional[DocSearchPageRequestSelectedModel] = None
+ avoid_repetition: typing.Optional[bool] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[float] = None
+ max_tokens: typing.Optional[int] = None
+ sampling_temperature: typing.Optional[float] = None
+ citation_style: typing.Optional[DocSearchPageRequestCitationStyle] = None
+ settings: typing.Optional[RunSettings] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/doc_search_page_request_citation_style.py b/src/gooey/types/doc_search_page_request_citation_style.py
new file mode 100644
index 0000000..b47b3be
--- /dev/null
+++ b/src/gooey/types/doc_search_page_request_citation_style.py
@@ -0,0 +1,25 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+DocSearchPageRequestCitationStyle = typing.Union[
+ typing.Literal[
+ "number",
+ "title",
+ "url",
+ "symbol",
+ "markdown",
+ "html",
+ "slack_mrkdwn",
+ "plaintext",
+ "number_markdown",
+ "number_html",
+ "number_slack_mrkdwn",
+ "number_plaintext",
+ "symbol_markdown",
+ "symbol_html",
+ "symbol_slack_mrkdwn",
+ "symbol_plaintext",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/doc_search_page_request_embedding_model.py b/src/gooey/types/doc_search_page_request_embedding_model.py
new file mode 100644
index 0000000..fb35612
--- /dev/null
+++ b/src/gooey/types/doc_search_page_request_embedding_model.py
@@ -0,0 +1,18 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+DocSearchPageRequestEmbeddingModel = typing.Union[
+ typing.Literal[
+ "openai_3_large",
+ "openai_3_small",
+ "openai_ada_2",
+ "e5_large_v2",
+ "e5_base_v2",
+ "multilingual_e5_base",
+ "multilingual_e5_large",
+ "gte_large",
+ "gte_base",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/doc_search_page_request_keyword_query.py b/src/gooey/types/doc_search_page_request_keyword_query.py
new file mode 100644
index 0000000..8083b3d
--- /dev/null
+++ b/src/gooey/types/doc_search_page_request_keyword_query.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+DocSearchPageRequestKeywordQuery = typing.Union[str, typing.List[str]]
diff --git a/src/gooey/types/doc_search_page_request_selected_model.py b/src/gooey/types/doc_search_page_request_selected_model.py
new file mode 100644
index 0000000..0c88fb5
--- /dev/null
+++ b/src/gooey/types/doc_search_page_request_selected_model.py
@@ -0,0 +1,39 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+DocSearchPageRequestSelectedModel = typing.Union[
+ typing.Literal[
+ "gpt_4_o",
+ "gpt_4_turbo_vision",
+ "gpt_4_vision",
+ "gpt_4_turbo",
+ "gpt_4",
+ "gpt_4_32k",
+ "gpt_3_5_turbo",
+ "gpt_3_5_turbo_16k",
+ "gpt_3_5_turbo_instruct",
+ "llama3_70b",
+ "llama3_8b",
+ "llama2_70b_chat",
+ "mixtral_8x7b_instruct_0_1",
+ "gemma_7b_it",
+ "gemini_1_5_pro",
+ "gemini_1_pro_vision",
+ "gemini_1_pro",
+ "palm2_chat",
+ "palm2_text",
+ "claude_3_5_sonnet",
+ "claude_3_opus",
+ "claude_3_sonnet",
+ "claude_3_haiku",
+ "sea_lion_7b_instruct",
+ "text_davinci_003",
+ "text_davinci_002",
+ "code_davinci_002",
+ "text_curie_001",
+ "text_babbage_001",
+ "text_ada_001",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/doc_search_page_response.py b/src/gooey/types/doc_search_page_response.py
new file mode 100644
index 0000000..a1b5706
--- /dev/null
+++ b/src/gooey/types/doc_search_page_response.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .doc_search_page_output import DocSearchPageOutput
+
+
+class DocSearchPageResponse(pydantic_v1.BaseModel):
+ id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ output: DocSearchPageOutput = pydantic_v1.Field()
+ """
+ Output of the run
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/doc_search_page_status_response.py b/src/gooey/types/doc_search_page_status_response.py
new file mode 100644
index 0000000..f1afb89
--- /dev/null
+++ b/src/gooey/types/doc_search_page_status_response.py
@@ -0,0 +1,64 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .doc_search_page_output import DocSearchPageOutput
+from .recipe_run_state import RecipeRunState
+
+
+class DocSearchPageStatusResponse(pydantic_v1.BaseModel):
+ run_id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ web_url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ run_time_sec: int = pydantic_v1.Field()
+ """
+ Total run time in seconds
+ """
+
+ status: RecipeRunState = pydantic_v1.Field()
+ """
+ Status of the run
+ """
+
+ detail: str = pydantic_v1.Field()
+ """
+ Details about the status of the run as a human readable string
+ """
+
+ output: typing.Optional[DocSearchPageOutput] = pydantic_v1.Field(default=None)
+ """
+ Output of the run. Only available if status is `"completed"`
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/doc_summary_page_output.py b/src/gooey/types/doc_summary_page_output.py
new file mode 100644
index 0000000..ed94493
--- /dev/null
+++ b/src/gooey/types/doc_summary_page_output.py
@@ -0,0 +1,34 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .called_function_response import CalledFunctionResponse
+from .prompt_tree_node import PromptTreeNode
+
+
+class DocSummaryPageOutput(pydantic_v1.BaseModel):
+ output_text: typing.List[str]
+ prompt_tree: typing.Optional[typing.List[PromptTreeNode]] = None
+ final_prompt: str
+ called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/doc_summary_page_request.py b/src/gooey/types/doc_summary_page_request.py
new file mode 100644
index 0000000..66a9fc9
--- /dev/null
+++ b/src/gooey/types/doc_summary_page_request.py
@@ -0,0 +1,51 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .doc_summary_page_request_selected_asr_model import DocSummaryPageRequestSelectedAsrModel
+from .doc_summary_page_request_selected_model import DocSummaryPageRequestSelectedModel
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+
+
+class DocSummaryPageRequest(pydantic_v1.BaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ documents: typing.List[str]
+ task_instructions: typing.Optional[str] = None
+ merge_instructions: typing.Optional[str] = None
+ selected_model: typing.Optional[DocSummaryPageRequestSelectedModel] = None
+ avoid_repetition: typing.Optional[bool] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[float] = None
+ max_tokens: typing.Optional[int] = None
+ sampling_temperature: typing.Optional[float] = None
+ chain_type: typing.Optional[typing.Literal["map_reduce"]] = None
+ selected_asr_model: typing.Optional[DocSummaryPageRequestSelectedAsrModel] = None
+ google_translate_target: typing.Optional[str] = None
+ settings: typing.Optional[RunSettings] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/doc_summary_page_request_selected_asr_model.py b/src/gooey/types/doc_summary_page_request_selected_asr_model.py
new file mode 100644
index 0000000..d189aa1
--- /dev/null
+++ b/src/gooey/types/doc_summary_page_request_selected_asr_model.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+DocSummaryPageRequestSelectedAsrModel = typing.Union[
+ typing.Literal[
+ "whisper_large_v2",
+ "whisper_large_v3",
+ "whisper_hindi_large_v2",
+ "whisper_telugu_large_v2",
+ "nemo_english",
+ "nemo_hindi",
+ "vakyansh_bhojpuri",
+ "gcp_v1",
+ "usm",
+ "deepgram",
+ "azure",
+ "seamless_m4t",
+ "mms_1b_all",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/doc_summary_page_request_selected_model.py b/src/gooey/types/doc_summary_page_request_selected_model.py
new file mode 100644
index 0000000..55e97da
--- /dev/null
+++ b/src/gooey/types/doc_summary_page_request_selected_model.py
@@ -0,0 +1,39 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+DocSummaryPageRequestSelectedModel = typing.Union[
+ typing.Literal[
+ "gpt_4_o",
+ "gpt_4_turbo_vision",
+ "gpt_4_vision",
+ "gpt_4_turbo",
+ "gpt_4",
+ "gpt_4_32k",
+ "gpt_3_5_turbo",
+ "gpt_3_5_turbo_16k",
+ "gpt_3_5_turbo_instruct",
+ "llama3_70b",
+ "llama3_8b",
+ "llama2_70b_chat",
+ "mixtral_8x7b_instruct_0_1",
+ "gemma_7b_it",
+ "gemini_1_5_pro",
+ "gemini_1_pro_vision",
+ "gemini_1_pro",
+ "palm2_chat",
+ "palm2_text",
+ "claude_3_5_sonnet",
+ "claude_3_opus",
+ "claude_3_sonnet",
+ "claude_3_haiku",
+ "sea_lion_7b_instruct",
+ "text_davinci_003",
+ "text_davinci_002",
+ "code_davinci_002",
+ "text_curie_001",
+ "text_babbage_001",
+ "text_ada_001",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/doc_summary_page_response.py b/src/gooey/types/doc_summary_page_response.py
new file mode 100644
index 0000000..b2511b6
--- /dev/null
+++ b/src/gooey/types/doc_summary_page_response.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .doc_summary_page_output import DocSummaryPageOutput
+
+
+class DocSummaryPageResponse(pydantic_v1.BaseModel):
+ id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ output: DocSummaryPageOutput = pydantic_v1.Field()
+ """
+ Output of the run
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/doc_summary_page_status_response.py b/src/gooey/types/doc_summary_page_status_response.py
new file mode 100644
index 0000000..6967032
--- /dev/null
+++ b/src/gooey/types/doc_summary_page_status_response.py
@@ -0,0 +1,64 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .doc_summary_page_output import DocSummaryPageOutput
+from .recipe_run_state import RecipeRunState
+
+
+class DocSummaryPageStatusResponse(pydantic_v1.BaseModel):
+ run_id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ web_url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ run_time_sec: int = pydantic_v1.Field()
+ """
+ Total run time in seconds
+ """
+
+ status: RecipeRunState = pydantic_v1.Field()
+ """
+ Status of the run
+ """
+
+ detail: str = pydantic_v1.Field()
+ """
+ Details about the status of the run as a human readable string
+ """
+
+ output: typing.Optional[DocSummaryPageOutput] = pydantic_v1.Field(default=None)
+ """
+ Output of the run. Only available if status is `"completed"`
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/email_face_inpainting_page_output.py b/src/gooey/types/email_face_inpainting_page_output.py
new file mode 100644
index 0000000..07dc19e
--- /dev/null
+++ b/src/gooey/types/email_face_inpainting_page_output.py
@@ -0,0 +1,36 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .called_function_response import CalledFunctionResponse
+
+
+class EmailFaceInpaintingPageOutput(pydantic_v1.BaseModel):
+ input_image: str
+ resized_image: str
+ face_mask: str
+ diffusion_images: typing.List[str]
+ output_images: typing.List[str]
+ email_sent: typing.Optional[bool] = None
+ called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/email_face_inpainting_page_request.py b/src/gooey/types/email_face_inpainting_page_request.py
new file mode 100644
index 0000000..cbe1d23
--- /dev/null
+++ b/src/gooey/types/email_face_inpainting_page_request.py
@@ -0,0 +1,61 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .email_face_inpainting_page_request_selected_model import EmailFaceInpaintingPageRequestSelectedModel
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+
+
+class EmailFaceInpaintingPageRequest(pydantic_v1.BaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ email_address: typing.Optional[str] = None
+ twitter_handle: typing.Optional[str] = None
+ text_prompt: str
+ face_scale: typing.Optional[float] = None
+ face_pos_x: typing.Optional[float] = None
+ face_pos_y: typing.Optional[float] = None
+ selected_model: typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] = None
+ negative_prompt: typing.Optional[str] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[int] = None
+ upscale_factor: typing.Optional[float] = None
+ output_width: typing.Optional[int] = None
+ output_height: typing.Optional[int] = None
+ guidance_scale: typing.Optional[float] = None
+ should_send_email: typing.Optional[bool] = None
+ email_from: typing.Optional[str] = None
+ email_cc: typing.Optional[str] = None
+ email_bcc: typing.Optional[str] = None
+ email_subject: typing.Optional[str] = None
+ email_body: typing.Optional[str] = None
+ email_body_enable_html: typing.Optional[bool] = None
+ fallback_email_body: typing.Optional[str] = None
+ seed: typing.Optional[int] = None
+ settings: typing.Optional[RunSettings] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/email_face_inpainting_page_request_selected_model.py b/src/gooey/types/email_face_inpainting_page_request_selected_model.py
new file mode 100644
index 0000000..822b5a6
--- /dev/null
+++ b/src/gooey/types/email_face_inpainting_page_request_selected_model.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+EmailFaceInpaintingPageRequestSelectedModel = typing.Union[
+ typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any
+]
diff --git a/src/gooey/types/email_face_inpainting_page_response.py b/src/gooey/types/email_face_inpainting_page_response.py
new file mode 100644
index 0000000..bf73ac9
--- /dev/null
+++ b/src/gooey/types/email_face_inpainting_page_response.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .email_face_inpainting_page_output import EmailFaceInpaintingPageOutput
+
+
+class EmailFaceInpaintingPageResponse(pydantic_v1.BaseModel):
+ id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ output: EmailFaceInpaintingPageOutput = pydantic_v1.Field()
+ """
+ Output of the run
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/email_face_inpainting_page_status_response.py b/src/gooey/types/email_face_inpainting_page_status_response.py
new file mode 100644
index 0000000..439b672
--- /dev/null
+++ b/src/gooey/types/email_face_inpainting_page_status_response.py
@@ -0,0 +1,64 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .email_face_inpainting_page_output import EmailFaceInpaintingPageOutput
+from .recipe_run_state import RecipeRunState
+
+
+class EmailFaceInpaintingPageStatusResponse(pydantic_v1.BaseModel):
+ run_id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ web_url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ run_time_sec: int = pydantic_v1.Field()
+ """
+ Total run time in seconds
+ """
+
+ status: RecipeRunState = pydantic_v1.Field()
+ """
+ Status of the run
+ """
+
+ detail: str = pydantic_v1.Field()
+ """
+ Details about the status of the run as a human readable string
+ """
+
+ output: typing.Optional[EmailFaceInpaintingPageOutput] = pydantic_v1.Field(default=None)
+ """
+ Output of the run. Only available if status is `"completed"`
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/embeddings_page_output.py b/src/gooey/types/embeddings_page_output.py
new file mode 100644
index 0000000..f4ae139
--- /dev/null
+++ b/src/gooey/types/embeddings_page_output.py
@@ -0,0 +1,31 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .called_function_response import CalledFunctionResponse
+
+
+class EmbeddingsPageOutput(pydantic_v1.BaseModel):
+ embeddings: typing.List[typing.List[float]]
+ called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/embeddings_page_request.py b/src/gooey/types/embeddings_page_request.py
new file mode 100644
index 0000000..961b82b
--- /dev/null
+++ b/src/gooey/types/embeddings_page_request.py
@@ -0,0 +1,40 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .embeddings_page_request_selected_model import EmbeddingsPageRequestSelectedModel
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+
+
+class EmbeddingsPageRequest(pydantic_v1.BaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ texts: typing.List[str]
+ selected_model: typing.Optional[EmbeddingsPageRequestSelectedModel] = None
+ settings: typing.Optional[RunSettings] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/embeddings_page_request_selected_model.py b/src/gooey/types/embeddings_page_request_selected_model.py
new file mode 100644
index 0000000..a03ecc8
--- /dev/null
+++ b/src/gooey/types/embeddings_page_request_selected_model.py
@@ -0,0 +1,18 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+EmbeddingsPageRequestSelectedModel = typing.Union[
+ typing.Literal[
+ "openai_3_large",
+ "openai_3_small",
+ "openai_ada_2",
+ "e5_large_v2",
+ "e5_base_v2",
+ "multilingual_e5_base",
+ "multilingual_e5_large",
+ "gte_large",
+ "gte_base",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/embeddings_page_response.py b/src/gooey/types/embeddings_page_response.py
new file mode 100644
index 0000000..108ad23
--- /dev/null
+++ b/src/gooey/types/embeddings_page_response.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .embeddings_page_output import EmbeddingsPageOutput
+
+
+class EmbeddingsPageResponse(pydantic_v1.BaseModel):
+ id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ output: EmbeddingsPageOutput = pydantic_v1.Field()
+ """
+ Output of the run
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/embeddings_page_status_response.py b/src/gooey/types/embeddings_page_status_response.py
new file mode 100644
index 0000000..583ddc8
--- /dev/null
+++ b/src/gooey/types/embeddings_page_status_response.py
@@ -0,0 +1,64 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .embeddings_page_output import EmbeddingsPageOutput
+from .recipe_run_state import RecipeRunState
+
+
+class EmbeddingsPageStatusResponse(pydantic_v1.BaseModel):
+ run_id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ web_url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ run_time_sec: int = pydantic_v1.Field()
+ """
+ Total run time in seconds
+ """
+
+ status: RecipeRunState = pydantic_v1.Field()
+ """
+ Status of the run
+ """
+
+ detail: str = pydantic_v1.Field()
+ """
+ Details about the status of the run as a human readable string
+ """
+
+ output: typing.Optional[EmbeddingsPageOutput] = pydantic_v1.Field(default=None)
+ """
+ Output of the run. Only available if status is `"completed"`
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/eval_prompt.py b/src/gooey/types/eval_prompt.py
new file mode 100644
index 0000000..92a92e0
--- /dev/null
+++ b/src/gooey/types/eval_prompt.py
@@ -0,0 +1,30 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+
+
+class EvalPrompt(pydantic_v1.BaseModel):
+ name: str
+ prompt: str
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/face_inpainting_page_output.py b/src/gooey/types/face_inpainting_page_output.py
new file mode 100644
index 0000000..eba4c21
--- /dev/null
+++ b/src/gooey/types/face_inpainting_page_output.py
@@ -0,0 +1,34 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .called_function_response import CalledFunctionResponse
+
+
+class FaceInpaintingPageOutput(pydantic_v1.BaseModel):
+ resized_image: str
+ face_mask: str
+ diffusion_images: typing.List[str]
+ output_images: typing.List[str]
+ called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/face_inpainting_page_request.py b/src/gooey/types/face_inpainting_page_request.py
new file mode 100644
index 0000000..a38943c
--- /dev/null
+++ b/src/gooey/types/face_inpainting_page_request.py
@@ -0,0 +1,52 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .face_inpainting_page_request_selected_model import FaceInpaintingPageRequestSelectedModel
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+
+
+class FaceInpaintingPageRequest(pydantic_v1.BaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ input_image: str
+ text_prompt: str
+ face_scale: typing.Optional[float] = None
+ face_pos_x: typing.Optional[float] = None
+ face_pos_y: typing.Optional[float] = None
+ selected_model: typing.Optional[FaceInpaintingPageRequestSelectedModel] = None
+ negative_prompt: typing.Optional[str] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[int] = None
+ upscale_factor: typing.Optional[float] = None
+ output_width: typing.Optional[int] = None
+ output_height: typing.Optional[int] = None
+ guidance_scale: typing.Optional[float] = None
+ seed: typing.Optional[int] = None
+ settings: typing.Optional[RunSettings] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/face_inpainting_page_request_selected_model.py b/src/gooey/types/face_inpainting_page_request_selected_model.py
new file mode 100644
index 0000000..9b8eab6
--- /dev/null
+++ b/src/gooey/types/face_inpainting_page_request_selected_model.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+FaceInpaintingPageRequestSelectedModel = typing.Union[
+ typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any
+]
diff --git a/src/gooey/types/face_inpainting_page_response.py b/src/gooey/types/face_inpainting_page_response.py
new file mode 100644
index 0000000..f87eac9
--- /dev/null
+++ b/src/gooey/types/face_inpainting_page_response.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .face_inpainting_page_output import FaceInpaintingPageOutput
+
+
+class FaceInpaintingPageResponse(pydantic_v1.BaseModel):
+ id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ output: FaceInpaintingPageOutput = pydantic_v1.Field()
+ """
+ Output of the run
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/face_inpainting_page_status_response.py b/src/gooey/types/face_inpainting_page_status_response.py
new file mode 100644
index 0000000..8782e7a
--- /dev/null
+++ b/src/gooey/types/face_inpainting_page_status_response.py
@@ -0,0 +1,64 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .face_inpainting_page_output import FaceInpaintingPageOutput
+from .recipe_run_state import RecipeRunState
+
+
+class FaceInpaintingPageStatusResponse(pydantic_v1.BaseModel):
+ run_id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ web_url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ run_time_sec: int = pydantic_v1.Field()
+ """
+ Total run time in seconds
+ """
+
+ status: RecipeRunState = pydantic_v1.Field()
+ """
+ Status of the run
+ """
+
+ detail: str = pydantic_v1.Field()
+ """
+ Details about the status of the run as a human readable string
+ """
+
+ output: typing.Optional[FaceInpaintingPageOutput] = pydantic_v1.Field(default=None)
+ """
+ Output of the run. Only available if status is `"completed"`
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/failed_reponse_model_v2.py b/src/gooey/types/failed_reponse_model_v2.py
new file mode 100644
index 0000000..9f9acbc
--- /dev/null
+++ b/src/gooey/types/failed_reponse_model_v2.py
@@ -0,0 +1,30 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .failed_response_detail import FailedResponseDetail
+
+
+class FailedReponseModelV2(pydantic_v1.BaseModel):
+ detail: FailedResponseDetail
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/failed_response_detail.py b/src/gooey/types/failed_response_detail.py
new file mode 100644
index 0000000..8f6f836
--- /dev/null
+++ b/src/gooey/types/failed_response_detail.py
@@ -0,0 +1,47 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+
+
+class FailedResponseDetail(pydantic_v1.BaseModel):
+ id: typing.Optional[str] = pydantic_v1.Field(default=None)
+ """
+ Unique ID for this run
+ """
+
+ url: typing.Optional[str] = pydantic_v1.Field(default=None)
+ """
+ Web URL for this run
+ """
+
+ created_at: typing.Optional[str] = pydantic_v1.Field(default=None)
+ """
+ Time when the run was created as ISO format
+ """
+
+ error: typing.Optional[str] = pydantic_v1.Field(default=None)
+ """
+ Error message if the run failed
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/final_response.py b/src/gooey/types/final_response.py
new file mode 100644
index 0000000..bc2a625
--- /dev/null
+++ b/src/gooey/types/final_response.py
@@ -0,0 +1,69 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .recipe_run_state import RecipeRunState
+from .response_model import ResponseModel
+
+
+class FinalResponse(pydantic_v1.BaseModel):
+ run_id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ web_url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ run_time_sec: int = pydantic_v1.Field()
+ """
+ Total run time in seconds
+ """
+
+ status: RecipeRunState = pydantic_v1.Field()
+ """
+ Status of the run
+ """
+
+ detail: str = pydantic_v1.Field()
+ """
+ Details about the status of the run as a human readable string
+ """
+
+ output: typing.Optional[ResponseModel] = pydantic_v1.Field(default=None)
+ """
+ Output of the run. Only available if status is `"completed"`
+ """
+
+ type: typing.Optional[str] = pydantic_v1.Field(default=None)
+ """
+ The run has completed. Use the `status_url` to check the status of the run and fetch the complete output.
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/function.py b/src/gooey/types/function.py
new file mode 100644
index 0000000..5841377
--- /dev/null
+++ b/src/gooey/types/function.py
@@ -0,0 +1,25 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+Function = typing.Union[
+ typing.Literal[
+ "mean",
+ "median",
+ "min",
+ "max",
+ "sum",
+ "cumsum",
+ "prod",
+ "cumprod",
+ "std",
+ "var",
+ "first",
+ "last",
+ "count",
+ "cumcount",
+ "nunique",
+ "rank",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/functions_page_output.py b/src/gooey/types/functions_page_output.py
new file mode 100644
index 0000000..267d709
--- /dev/null
+++ b/src/gooey/types/functions_page_output.py
@@ -0,0 +1,42 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .called_function_response import CalledFunctionResponse
+from .console_logs import ConsoleLogs
+
+
+class FunctionsPageOutput(pydantic_v1.BaseModel):
+ return_value: typing.Optional[typing.Any] = None
+ error: typing.Optional[str] = pydantic_v1.Field(default=None)
+ """
+ JS Error from the code. If there are no errors, this will be null
+ """
+
+ logs: typing.Optional[typing.List[ConsoleLogs]] = pydantic_v1.Field(default=None)
+ """
+ Console logs from the code execution
+ """
+
+ called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/functions_page_request.py b/src/gooey/types/functions_page_request.py
new file mode 100644
index 0000000..f3077d0
--- /dev/null
+++ b/src/gooey/types/functions_page_request.py
@@ -0,0 +1,40 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .run_settings import RunSettings
+
+
+class FunctionsPageRequest(pydantic_v1.BaseModel):
+ code: typing.Optional[str] = pydantic_v1.Field(default=None)
+ """
+ The JS code to be executed.
+ """
+
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
+ """
+ Variables to be used in the code
+ """
+
+ settings: typing.Optional[RunSettings] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/functions_page_response.py b/src/gooey/types/functions_page_response.py
new file mode 100644
index 0000000..9841a8c
--- /dev/null
+++ b/src/gooey/types/functions_page_response.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .functions_page_output import FunctionsPageOutput
+
+
+class FunctionsPageResponse(pydantic_v1.BaseModel):
+ id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ output: FunctionsPageOutput = pydantic_v1.Field()
+ """
+ Output of the run
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/functions_page_status_response.py b/src/gooey/types/functions_page_status_response.py
new file mode 100644
index 0000000..01d8944
--- /dev/null
+++ b/src/gooey/types/functions_page_status_response.py
@@ -0,0 +1,64 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .functions_page_output import FunctionsPageOutput
+from .recipe_run_state import RecipeRunState
+
+
+class FunctionsPageStatusResponse(pydantic_v1.BaseModel):
+ run_id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ web_url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ run_time_sec: int = pydantic_v1.Field()
+ """
+ Total run time in seconds
+ """
+
+ status: RecipeRunState = pydantic_v1.Field()
+ """
+ Status of the run
+ """
+
+ detail: str = pydantic_v1.Field()
+ """
+ Details about the status of the run as a human readable string
+ """
+
+ output: typing.Optional[FunctionsPageOutput] = pydantic_v1.Field(default=None)
+ """
+ Output of the run. Only available if status is `"completed"`
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/generic_error_response.py b/src/gooey/types/generic_error_response.py
new file mode 100644
index 0000000..8315fc1
--- /dev/null
+++ b/src/gooey/types/generic_error_response.py
@@ -0,0 +1,30 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .generic_error_response_detail import GenericErrorResponseDetail
+
+
+class GenericErrorResponse(pydantic_v1.BaseModel):
+ detail: GenericErrorResponseDetail
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/generic_error_response_detail.py b/src/gooey/types/generic_error_response_detail.py
new file mode 100644
index 0000000..489456c
--- /dev/null
+++ b/src/gooey/types/generic_error_response_detail.py
@@ -0,0 +1,29 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+
+
+class GenericErrorResponseDetail(pydantic_v1.BaseModel):
+ error: str
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/google_gpt_page_output.py b/src/gooey/types/google_gpt_page_output.py
new file mode 100644
index 0000000..02d35e6
--- /dev/null
+++ b/src/gooey/types/google_gpt_page_output.py
@@ -0,0 +1,36 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .called_function_response import CalledFunctionResponse
+from .search_reference import SearchReference
+
+
+class GoogleGptPageOutput(pydantic_v1.BaseModel):
+ output_text: typing.List[str]
+ serp_results: typing.Dict[str, typing.Any]
+ references: typing.List[SearchReference]
+ final_prompt: str
+ final_search_query: typing.Optional[str] = None
+ called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/google_gpt_page_request.py b/src/gooey/types/google_gpt_page_request.py
new file mode 100644
index 0000000..1cabbbf
--- /dev/null
+++ b/src/gooey/types/google_gpt_page_request.py
@@ -0,0 +1,74 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .google_gpt_page_request_embedding_model import GoogleGptPageRequestEmbeddingModel
+from .google_gpt_page_request_selected_model import GoogleGptPageRequestSelectedModel
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+from .serp_search_location import SerpSearchLocation
+from .serp_search_type import SerpSearchType
+
+
+class GoogleGptPageRequest(pydantic_v1.BaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ serp_search_location: typing.Optional[SerpSearchLocation] = None
+ scaleserp_locations: typing.Optional[typing.List[str]] = pydantic_v1.Field(default=None)
+ """
+ DEPRECATED: use `serp_search_location` instead
+ """
+
+ serp_search_type: typing.Optional[SerpSearchType] = None
+ scaleserp_search_field: typing.Optional[str] = pydantic_v1.Field(default=None)
+ """
+ DEPRECATED: use `serp_search_type` instead
+ """
+
+ search_query: str
+ site_filter: str
+ task_instructions: typing.Optional[str] = None
+ query_instructions: typing.Optional[str] = None
+ selected_model: typing.Optional[GoogleGptPageRequestSelectedModel] = None
+ avoid_repetition: typing.Optional[bool] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[float] = None
+ max_tokens: typing.Optional[int] = None
+ sampling_temperature: typing.Optional[float] = None
+ max_search_urls: typing.Optional[int] = None
+ max_references: typing.Optional[int] = None
+ max_context_words: typing.Optional[int] = None
+ scroll_jump: typing.Optional[int] = None
+ embedding_model: typing.Optional[GoogleGptPageRequestEmbeddingModel] = None
+ dense_weight: typing.Optional[float] = pydantic_v1.Field(default=None)
+ """
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+ """
+
+ settings: typing.Optional[RunSettings] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/google_gpt_page_request_embedding_model.py b/src/gooey/types/google_gpt_page_request_embedding_model.py
new file mode 100644
index 0000000..66f060f
--- /dev/null
+++ b/src/gooey/types/google_gpt_page_request_embedding_model.py
@@ -0,0 +1,18 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+GoogleGptPageRequestEmbeddingModel = typing.Union[
+ typing.Literal[
+ "openai_3_large",
+ "openai_3_small",
+ "openai_ada_2",
+ "e5_large_v2",
+ "e5_base_v2",
+ "multilingual_e5_base",
+ "multilingual_e5_large",
+ "gte_large",
+ "gte_base",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/google_gpt_page_request_selected_model.py b/src/gooey/types/google_gpt_page_request_selected_model.py
new file mode 100644
index 0000000..8d72870
--- /dev/null
+++ b/src/gooey/types/google_gpt_page_request_selected_model.py
@@ -0,0 +1,39 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+GoogleGptPageRequestSelectedModel = typing.Union[
+ typing.Literal[
+ "gpt_4_o",
+ "gpt_4_turbo_vision",
+ "gpt_4_vision",
+ "gpt_4_turbo",
+ "gpt_4",
+ "gpt_4_32k",
+ "gpt_3_5_turbo",
+ "gpt_3_5_turbo_16k",
+ "gpt_3_5_turbo_instruct",
+ "llama3_70b",
+ "llama3_8b",
+ "llama2_70b_chat",
+ "mixtral_8x7b_instruct_0_1",
+ "gemma_7b_it",
+ "gemini_1_5_pro",
+ "gemini_1_pro_vision",
+ "gemini_1_pro",
+ "palm2_chat",
+ "palm2_text",
+ "claude_3_5_sonnet",
+ "claude_3_opus",
+ "claude_3_sonnet",
+ "claude_3_haiku",
+ "sea_lion_7b_instruct",
+ "text_davinci_003",
+ "text_davinci_002",
+ "code_davinci_002",
+ "text_curie_001",
+ "text_babbage_001",
+ "text_ada_001",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/google_gpt_page_response.py b/src/gooey/types/google_gpt_page_response.py
new file mode 100644
index 0000000..d7b90e1
--- /dev/null
+++ b/src/gooey/types/google_gpt_page_response.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .google_gpt_page_output import GoogleGptPageOutput
+
+
+class GoogleGptPageResponse(pydantic_v1.BaseModel):
+ id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ output: GoogleGptPageOutput = pydantic_v1.Field()
+ """
+ Output of the run
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/google_gpt_page_status_response.py b/src/gooey/types/google_gpt_page_status_response.py
new file mode 100644
index 0000000..3454640
--- /dev/null
+++ b/src/gooey/types/google_gpt_page_status_response.py
@@ -0,0 +1,64 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .google_gpt_page_output import GoogleGptPageOutput
+from .recipe_run_state import RecipeRunState
+
+
+class GoogleGptPageStatusResponse(pydantic_v1.BaseModel):
+ run_id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ web_url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ run_time_sec: int = pydantic_v1.Field()
+ """
+ Total run time in seconds
+ """
+
+ status: RecipeRunState = pydantic_v1.Field()
+ """
+ Status of the run
+ """
+
+ detail: str = pydantic_v1.Field()
+ """
+ Details about the status of the run as a human readable string
+ """
+
+ output: typing.Optional[GoogleGptPageOutput] = pydantic_v1.Field(default=None)
+ """
+ Output of the run. Only available if status is `"completed"`
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/google_image_gen_page_output.py b/src/gooey/types/google_image_gen_page_output.py
new file mode 100644
index 0000000..c9cdce1
--- /dev/null
+++ b/src/gooey/types/google_image_gen_page_output.py
@@ -0,0 +1,33 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .called_function_response import CalledFunctionResponse
+
+
+class GoogleImageGenPageOutput(pydantic_v1.BaseModel):
+ output_images: typing.List[str]
+ image_urls: typing.List[str]
+ selected_image: typing.Optional[str] = None
+ called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/google_image_gen_page_request.py b/src/gooey/types/google_image_gen_page_request.py
new file mode 100644
index 0000000..d0f8c80
--- /dev/null
+++ b/src/gooey/types/google_image_gen_page_request.py
@@ -0,0 +1,58 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .google_image_gen_page_request_selected_model import GoogleImageGenPageRequestSelectedModel
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+from .serp_search_location import SerpSearchLocation
+
+
+class GoogleImageGenPageRequest(pydantic_v1.BaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ serp_search_location: typing.Optional[SerpSearchLocation] = None
+ scaleserp_locations: typing.Optional[typing.List[str]] = pydantic_v1.Field(default=None)
+ """
+ DEPRECATED: use `serp_search_location` instead
+ """
+
+ search_query: str
+ text_prompt: str
+ selected_model: typing.Optional[GoogleImageGenPageRequestSelectedModel] = None
+ negative_prompt: typing.Optional[str] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[int] = None
+ guidance_scale: typing.Optional[float] = None
+ prompt_strength: typing.Optional[float] = None
+ sd2upscaling: typing.Optional[bool] = pydantic_v1.Field(alias="sd_2_upscaling", default=None)
+ seed: typing.Optional[int] = None
+ image_guidance_scale: typing.Optional[float] = None
+ settings: typing.Optional[RunSettings] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ allow_population_by_field_name = True
+ populate_by_name = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/google_image_gen_page_request_selected_model.py b/src/gooey/types/google_image_gen_page_request_selected_model.py
new file mode 100644
index 0000000..c872962
--- /dev/null
+++ b/src/gooey/types/google_image_gen_page_request_selected_model.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+GoogleImageGenPageRequestSelectedModel = typing.Union[
+ typing.Literal[
+ "dream_shaper",
+ "dreamlike_2",
+ "sd_2",
+ "sd_1_5",
+ "dall_e",
+ "instruct_pix2pix",
+ "openjourney_2",
+ "openjourney",
+ "analog_diffusion",
+ "protogen_5_3",
+ "jack_qiao",
+ "rodent_diffusion_1_5",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/google_image_gen_page_response.py b/src/gooey/types/google_image_gen_page_response.py
new file mode 100644
index 0000000..70fc953
--- /dev/null
+++ b/src/gooey/types/google_image_gen_page_response.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .google_image_gen_page_output import GoogleImageGenPageOutput
+
+
+class GoogleImageGenPageResponse(pydantic_v1.BaseModel):
+ id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ output: GoogleImageGenPageOutput = pydantic_v1.Field()
+ """
+ Output of the run
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/google_image_gen_page_status_response.py b/src/gooey/types/google_image_gen_page_status_response.py
new file mode 100644
index 0000000..4e9cf09
--- /dev/null
+++ b/src/gooey/types/google_image_gen_page_status_response.py
@@ -0,0 +1,64 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .google_image_gen_page_output import GoogleImageGenPageOutput
+from .recipe_run_state import RecipeRunState
+
+
+class GoogleImageGenPageStatusResponse(pydantic_v1.BaseModel):
+ run_id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ web_url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ run_time_sec: int = pydantic_v1.Field()
+ """
+ Total run time in seconds
+ """
+
+ status: RecipeRunState = pydantic_v1.Field()
+ """
+ Status of the run
+ """
+
+ detail: str = pydantic_v1.Field()
+ """
+ Details about the status of the run as a human readable string
+ """
+
+ output: typing.Optional[GoogleImageGenPageOutput] = pydantic_v1.Field(default=None)
+ """
+ Output of the run. Only available if status is `"completed"`
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/http_validation_error.py b/src/gooey/types/http_validation_error.py
new file mode 100644
index 0000000..30f8b24
--- /dev/null
+++ b/src/gooey/types/http_validation_error.py
@@ -0,0 +1,30 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .validation_error import ValidationError
+
+
+class HttpValidationError(pydantic_v1.BaseModel):
+ detail: typing.Optional[typing.List[ValidationError]] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/image_segmentation_page_output.py b/src/gooey/types/image_segmentation_page_output.py
new file mode 100644
index 0000000..8698dcc
--- /dev/null
+++ b/src/gooey/types/image_segmentation_page_output.py
@@ -0,0 +1,34 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .called_function_response import CalledFunctionResponse
+
+
+class ImageSegmentationPageOutput(pydantic_v1.BaseModel):
+ output_image: str
+ cutout_image: str
+ resized_image: str
+ resized_mask: str
+ called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/image_segmentation_page_request.py b/src/gooey/types/image_segmentation_page_request.py
new file mode 100644
index 0000000..01a9158
--- /dev/null
+++ b/src/gooey/types/image_segmentation_page_request.py
@@ -0,0 +1,46 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .image_segmentation_page_request_selected_model import ImageSegmentationPageRequestSelectedModel
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+
+
+class ImageSegmentationPageRequest(pydantic_v1.BaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ input_image: str
+ selected_model: typing.Optional[ImageSegmentationPageRequestSelectedModel] = None
+ mask_threshold: typing.Optional[float] = None
+ rect_persepective_transform: typing.Optional[bool] = None
+ reflection_opacity: typing.Optional[float] = None
+ obj_scale: typing.Optional[float] = None
+ obj_pos_x: typing.Optional[float] = None
+ obj_pos_y: typing.Optional[float] = None
+ settings: typing.Optional[RunSettings] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/image_segmentation_page_request_selected_model.py b/src/gooey/types/image_segmentation_page_request_selected_model.py
new file mode 100644
index 0000000..9b4b8d7
--- /dev/null
+++ b/src/gooey/types/image_segmentation_page_request_selected_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ImageSegmentationPageRequestSelectedModel = typing.Union[typing.Literal["dis", "u2net"], typing.Any]
diff --git a/src/gooey/types/image_segmentation_page_response.py b/src/gooey/types/image_segmentation_page_response.py
new file mode 100644
index 0000000..2b39c04
--- /dev/null
+++ b/src/gooey/types/image_segmentation_page_response.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .image_segmentation_page_output import ImageSegmentationPageOutput
+
+
+class ImageSegmentationPageResponse(pydantic_v1.BaseModel):
+ id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ output: ImageSegmentationPageOutput = pydantic_v1.Field()
+ """
+ Output of the run
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/image_segmentation_page_status_response.py b/src/gooey/types/image_segmentation_page_status_response.py
new file mode 100644
index 0000000..a075d55
--- /dev/null
+++ b/src/gooey/types/image_segmentation_page_status_response.py
@@ -0,0 +1,64 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .image_segmentation_page_output import ImageSegmentationPageOutput
+from .recipe_run_state import RecipeRunState
+
+
+class ImageSegmentationPageStatusResponse(pydantic_v1.BaseModel):
+ run_id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ web_url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ run_time_sec: int = pydantic_v1.Field()
+ """
+ Total run time in seconds
+ """
+
+ status: RecipeRunState = pydantic_v1.Field()
+ """
+ Status of the run
+ """
+
+ detail: str = pydantic_v1.Field()
+ """
+ Details about the status of the run as a human readable string
+ """
+
+ output: typing.Optional[ImageSegmentationPageOutput] = pydantic_v1.Field(default=None)
+ """
+ Output of the run. Only available if status is `"completed"`
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/image_url.py b/src/gooey/types/image_url.py
new file mode 100644
index 0000000..c1dd11e
--- /dev/null
+++ b/src/gooey/types/image_url.py
@@ -0,0 +1,31 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .detail import Detail
+
+
+class ImageUrl(pydantic_v1.BaseModel):
+ url: typing.Optional[str] = None
+ detail: typing.Optional[Detail] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/img2img_page_output.py b/src/gooey/types/img2img_page_output.py
new file mode 100644
index 0000000..92fdef4
--- /dev/null
+++ b/src/gooey/types/img2img_page_output.py
@@ -0,0 +1,31 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .called_function_response import CalledFunctionResponse
+
+
+class Img2ImgPageOutput(pydantic_v1.BaseModel):
+ output_images: typing.List[str]
+ called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/img2img_page_request.py b/src/gooey/types/img2img_page_request.py
new file mode 100644
index 0000000..b94a98f
--- /dev/null
+++ b/src/gooey/types/img2img_page_request.py
@@ -0,0 +1,53 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .img2img_page_request_selected_controlnet_model import Img2ImgPageRequestSelectedControlnetModel
+from .img2img_page_request_selected_model import Img2ImgPageRequestSelectedModel
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+
+
+class Img2ImgPageRequest(pydantic_v1.BaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ input_image: str
+ text_prompt: typing.Optional[str] = None
+ selected_model: typing.Optional[Img2ImgPageRequestSelectedModel] = None
+ selected_controlnet_model: typing.Optional[Img2ImgPageRequestSelectedControlnetModel] = None
+ negative_prompt: typing.Optional[str] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[int] = None
+ output_width: typing.Optional[int] = None
+ output_height: typing.Optional[int] = None
+ guidance_scale: typing.Optional[float] = None
+ prompt_strength: typing.Optional[float] = None
+ controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None
+ seed: typing.Optional[int] = None
+ image_guidance_scale: typing.Optional[float] = None
+ settings: typing.Optional[RunSettings] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/img2img_page_request_selected_controlnet_model.py b/src/gooey/types/img2img_page_request_selected_controlnet_model.py
new file mode 100644
index 0000000..dc17cc4
--- /dev/null
+++ b/src/gooey/types/img2img_page_request_selected_controlnet_model.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+from .img2img_page_request_selected_controlnet_model_item import Img2ImgPageRequestSelectedControlnetModelItem
+
+Img2ImgPageRequestSelectedControlnetModel = typing.Union[
+ typing.List[Img2ImgPageRequestSelectedControlnetModelItem],
+ typing.Literal["sd_controlnet_canny"],
+ typing.Literal["sd_controlnet_depth"],
+ typing.Literal["sd_controlnet_hed"],
+ typing.Literal["sd_controlnet_mlsd"],
+ typing.Literal["sd_controlnet_normal"],
+ typing.Literal["sd_controlnet_openpose"],
+ typing.Literal["sd_controlnet_scribble"],
+ typing.Literal["sd_controlnet_seg"],
+ typing.Literal["sd_controlnet_tile"],
+ typing.Literal["sd_controlnet_brightness"],
+ typing.Literal["control_v1p_sd15_qrcode_monster_v2"],
+]
diff --git a/src/gooey/types/img2img_page_request_selected_controlnet_model_item.py b/src/gooey/types/img2img_page_request_selected_controlnet_model_item.py
new file mode 100644
index 0000000..1569cf5
--- /dev/null
+++ b/src/gooey/types/img2img_page_request_selected_controlnet_model_item.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+Img2ImgPageRequestSelectedControlnetModelItem = typing.Union[
+ typing.Literal[
+ "sd_controlnet_canny",
+ "sd_controlnet_depth",
+ "sd_controlnet_hed",
+ "sd_controlnet_mlsd",
+ "sd_controlnet_normal",
+ "sd_controlnet_openpose",
+ "sd_controlnet_scribble",
+ "sd_controlnet_seg",
+ "sd_controlnet_tile",
+ "sd_controlnet_brightness",
+ "control_v1p_sd15_qrcode_monster_v2",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/img2img_page_request_selected_model.py b/src/gooey/types/img2img_page_request_selected_model.py
new file mode 100644
index 0000000..506c2b1
--- /dev/null
+++ b/src/gooey/types/img2img_page_request_selected_model.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+Img2ImgPageRequestSelectedModel = typing.Union[
+ typing.Literal[
+ "dream_shaper",
+ "dreamlike_2",
+ "sd_2",
+ "sd_1_5",
+ "dall_e",
+ "instruct_pix2pix",
+ "openjourney_2",
+ "openjourney",
+ "analog_diffusion",
+ "protogen_5_3",
+ "jack_qiao",
+ "rodent_diffusion_1_5",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/img2img_page_response.py b/src/gooey/types/img2img_page_response.py
new file mode 100644
index 0000000..406cc8e
--- /dev/null
+++ b/src/gooey/types/img2img_page_response.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .img2img_page_output import Img2ImgPageOutput
+
+
+class Img2ImgPageResponse(pydantic_v1.BaseModel):
+ id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ output: Img2ImgPageOutput = pydantic_v1.Field()
+ """
+ Output of the run
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/img2img_page_status_response.py b/src/gooey/types/img2img_page_status_response.py
new file mode 100644
index 0000000..38b3a6c
--- /dev/null
+++ b/src/gooey/types/img2img_page_status_response.py
@@ -0,0 +1,64 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .img2img_page_output import Img2ImgPageOutput
+from .recipe_run_state import RecipeRunState
+
+
+class Img2ImgPageStatusResponse(pydantic_v1.BaseModel):
+ run_id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ web_url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ run_time_sec: int = pydantic_v1.Field()
+ """
+ Total run time in seconds
+ """
+
+ status: RecipeRunState = pydantic_v1.Field()
+ """
+ Status of the run
+ """
+
+ detail: str = pydantic_v1.Field()
+ """
+ Details about the status of the run as a human readable string
+ """
+
+ output: typing.Optional[Img2ImgPageOutput] = pydantic_v1.Field(default=None)
+ """
+ Output of the run. Only available if status is `"completed"`
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/letter_writer_page_output.py b/src/gooey/types/letter_writer_page_output.py
new file mode 100644
index 0000000..1c2236f
--- /dev/null
+++ b/src/gooey/types/letter_writer_page_output.py
@@ -0,0 +1,34 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .called_function_response import CalledFunctionResponse
+
+
+class LetterWriterPageOutput(pydantic_v1.BaseModel):
+ output_letters: typing.List[str]
+ response_json: typing.Optional[typing.Any] = None
+ generated_input_prompt: str
+ final_prompt: str
+ called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/letter_writer_page_request.py b/src/gooey/types/letter_writer_page_request.py
new file mode 100644
index 0000000..5431b62
--- /dev/null
+++ b/src/gooey/types/letter_writer_page_request.py
@@ -0,0 +1,54 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+from .training_data_model import TrainingDataModel
+
+
+class LetterWriterPageRequest(pydantic_v1.BaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ action_id: str
+ prompt_header: typing.Optional[str] = None
+ example_letters: typing.Optional[typing.List[TrainingDataModel]] = None
+ lm_selected_api: typing.Optional[str] = None
+ lm_selected_engine: typing.Optional[str] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[float] = None
+ lm_sampling_temperature: typing.Optional[float] = None
+ api_http_method: typing.Optional[str] = None
+ api_url: typing.Optional[str] = None
+ api_headers: typing.Optional[str] = None
+ api_json_body: typing.Optional[str] = None
+ input_prompt: typing.Optional[str] = None
+ strip_html2text: typing.Optional[bool] = pydantic_v1.Field(alias="strip_html_2_text", default=None)
+ settings: typing.Optional[RunSettings] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ allow_population_by_field_name = True
+ populate_by_name = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/letter_writer_page_response.py b/src/gooey/types/letter_writer_page_response.py
new file mode 100644
index 0000000..d3056fd
--- /dev/null
+++ b/src/gooey/types/letter_writer_page_response.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .letter_writer_page_output import LetterWriterPageOutput
+
+
+class LetterWriterPageResponse(pydantic_v1.BaseModel):
+ id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ output: LetterWriterPageOutput = pydantic_v1.Field()
+ """
+ Output of the run
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/letter_writer_page_status_response.py b/src/gooey/types/letter_writer_page_status_response.py
new file mode 100644
index 0000000..8565736
--- /dev/null
+++ b/src/gooey/types/letter_writer_page_status_response.py
@@ -0,0 +1,64 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .letter_writer_page_output import LetterWriterPageOutput
+from .recipe_run_state import RecipeRunState
+
+
+class LetterWriterPageStatusResponse(pydantic_v1.BaseModel):
+ run_id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ web_url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ run_time_sec: int = pydantic_v1.Field()
+ """
+ Total run time in seconds
+ """
+
+ status: RecipeRunState = pydantic_v1.Field()
+ """
+ Status of the run
+ """
+
+ detail: str = pydantic_v1.Field()
+ """
+ Details about the status of the run as a human readable string
+ """
+
+ output: typing.Optional[LetterWriterPageOutput] = pydantic_v1.Field(default=None)
+ """
+ Output of the run. Only available if status is `"completed"`
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/level.py b/src/gooey/types/level.py
new file mode 100644
index 0000000..b87834f
--- /dev/null
+++ b/src/gooey/types/level.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+Level = typing.Union[typing.Literal["log", "error"], typing.Any]
diff --git a/src/gooey/types/lipsync_page_output.py b/src/gooey/types/lipsync_page_output.py
new file mode 100644
index 0000000..1ca6ad1
--- /dev/null
+++ b/src/gooey/types/lipsync_page_output.py
@@ -0,0 +1,31 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .called_function_response import CalledFunctionResponse
+
+
+class LipsyncPageOutput(pydantic_v1.BaseModel):
+ output_video: str
+ called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/lipsync_page_request.py b/src/gooey/types/lipsync_page_request.py
new file mode 100644
index 0000000..904d761
--- /dev/null
+++ b/src/gooey/types/lipsync_page_request.py
@@ -0,0 +1,47 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .lipsync_page_request_selected_model import LipsyncPageRequestSelectedModel
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+from .sad_talker_settings import SadTalkerSettings
+
+
+class LipsyncPageRequest(pydantic_v1.BaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ input_face: typing.Optional[str] = None
+ face_padding_top: typing.Optional[int] = None
+ face_padding_bottom: typing.Optional[int] = None
+ face_padding_left: typing.Optional[int] = None
+ face_padding_right: typing.Optional[int] = None
+ sadtalker_settings: typing.Optional[SadTalkerSettings] = None
+ selected_model: typing.Optional[LipsyncPageRequestSelectedModel] = None
+ input_audio: typing.Optional[str] = None
+ settings: typing.Optional[RunSettings] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/lipsync_page_request_selected_model.py b/src/gooey/types/lipsync_page_request_selected_model.py
new file mode 100644
index 0000000..da68ef8
--- /dev/null
+++ b/src/gooey/types/lipsync_page_request_selected_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+LipsyncPageRequestSelectedModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any]
diff --git a/src/gooey/types/lipsync_page_response.py b/src/gooey/types/lipsync_page_response.py
new file mode 100644
index 0000000..4603289
--- /dev/null
+++ b/src/gooey/types/lipsync_page_response.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .lipsync_page_output import LipsyncPageOutput
+
+
+class LipsyncPageResponse(pydantic_v1.BaseModel):
+ id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ output: LipsyncPageOutput = pydantic_v1.Field()
+ """
+ Output of the run
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/lipsync_page_status_response.py b/src/gooey/types/lipsync_page_status_response.py
new file mode 100644
index 0000000..8fe25b5
--- /dev/null
+++ b/src/gooey/types/lipsync_page_status_response.py
@@ -0,0 +1,64 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .lipsync_page_output import LipsyncPageOutput
+from .recipe_run_state import RecipeRunState
+
+
+class LipsyncPageStatusResponse(pydantic_v1.BaseModel):
+ run_id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ web_url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ run_time_sec: int = pydantic_v1.Field()
+ """
+ Total run time in seconds
+ """
+
+ status: RecipeRunState = pydantic_v1.Field()
+ """
+ Status of the run
+ """
+
+ detail: str = pydantic_v1.Field()
+ """
+ Details about the status of the run as a human readable string
+ """
+
+ output: typing.Optional[LipsyncPageOutput] = pydantic_v1.Field(default=None)
+ """
+ Output of the run. Only available if status is `"completed"`
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/lipsync_tts_page_output.py b/src/gooey/types/lipsync_tts_page_output.py
new file mode 100644
index 0000000..714d621
--- /dev/null
+++ b/src/gooey/types/lipsync_tts_page_output.py
@@ -0,0 +1,32 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .called_function_response import CalledFunctionResponse
+
+
+class LipsyncTtsPageOutput(pydantic_v1.BaseModel):
+ audio_url: typing.Optional[str] = None
+ output_video: str
+ called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/lipsync_tts_page_request.py b/src/gooey/types/lipsync_tts_page_request.py
new file mode 100644
index 0000000..04f5ce5
--- /dev/null
+++ b/src/gooey/types/lipsync_tts_page_request.py
@@ -0,0 +1,72 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .lipsync_tts_page_request_openai_tts_model import LipsyncTtsPageRequestOpenaiTtsModel
+from .lipsync_tts_page_request_openai_voice_name import LipsyncTtsPageRequestOpenaiVoiceName
+from .lipsync_tts_page_request_selected_model import LipsyncTtsPageRequestSelectedModel
+from .lipsync_tts_page_request_tts_provider import LipsyncTtsPageRequestTtsProvider
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+from .sad_talker_settings import SadTalkerSettings
+
+
+class LipsyncTtsPageRequest(pydantic_v1.BaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ text_prompt: str
+ tts_provider: typing.Optional[LipsyncTtsPageRequestTtsProvider] = None
+ uberduck_voice_name: typing.Optional[str] = None
+ uberduck_speaking_rate: typing.Optional[float] = None
+ google_voice_name: typing.Optional[str] = None
+ google_speaking_rate: typing.Optional[float] = None
+ google_pitch: typing.Optional[float] = None
+ bark_history_prompt: typing.Optional[str] = None
+ elevenlabs_voice_name: typing.Optional[str] = pydantic_v1.Field(default=None)
+ """
+ Use `elevenlabs_voice_id` instead
+ """
+
+ elevenlabs_api_key: typing.Optional[str] = None
+ elevenlabs_voice_id: typing.Optional[str] = None
+ elevenlabs_model: typing.Optional[str] = None
+ elevenlabs_stability: typing.Optional[float] = None
+ elevenlabs_similarity_boost: typing.Optional[float] = None
+ elevenlabs_style: typing.Optional[float] = None
+ elevenlabs_speaker_boost: typing.Optional[bool] = None
+ azure_voice_name: typing.Optional[str] = None
+ openai_voice_name: typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] = None
+ openai_tts_model: typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] = None
+ input_face: typing.Optional[str] = None
+ face_padding_top: typing.Optional[int] = None
+ face_padding_bottom: typing.Optional[int] = None
+ face_padding_left: typing.Optional[int] = None
+ face_padding_right: typing.Optional[int] = None
+ sadtalker_settings: typing.Optional[SadTalkerSettings] = None
+ selected_model: typing.Optional[LipsyncTtsPageRequestSelectedModel] = None
+ settings: typing.Optional[RunSettings] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/lipsync_tts_page_request_openai_tts_model.py b/src/gooey/types/lipsync_tts_page_request_openai_tts_model.py
new file mode 100644
index 0000000..453ab4a
--- /dev/null
+++ b/src/gooey/types/lipsync_tts_page_request_openai_tts_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+LipsyncTtsPageRequestOpenaiTtsModel = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any]
diff --git a/src/gooey/types/lipsync_tts_page_request_openai_voice_name.py b/src/gooey/types/lipsync_tts_page_request_openai_voice_name.py
new file mode 100644
index 0000000..4873924
--- /dev/null
+++ b/src/gooey/types/lipsync_tts_page_request_openai_voice_name.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+LipsyncTtsPageRequestOpenaiVoiceName = typing.Union[
+ typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any
+]
diff --git a/src/gooey/types/lipsync_tts_page_request_selected_model.py b/src/gooey/types/lipsync_tts_page_request_selected_model.py
new file mode 100644
index 0000000..538058b
--- /dev/null
+++ b/src/gooey/types/lipsync_tts_page_request_selected_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+LipsyncTtsPageRequestSelectedModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any]
diff --git a/src/gooey/types/lipsync_tts_page_request_tts_provider.py b/src/gooey/types/lipsync_tts_page_request_tts_provider.py
new file mode 100644
index 0000000..7e73fda
--- /dev/null
+++ b/src/gooey/types/lipsync_tts_page_request_tts_provider.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+LipsyncTtsPageRequestTtsProvider = typing.Union[
+ typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any
+]
diff --git a/src/gooey/types/lipsync_tts_page_response.py b/src/gooey/types/lipsync_tts_page_response.py
new file mode 100644
index 0000000..f495cb1
--- /dev/null
+++ b/src/gooey/types/lipsync_tts_page_response.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .lipsync_tts_page_output import LipsyncTtsPageOutput
+
+
+class LipsyncTtsPageResponse(pydantic_v1.BaseModel):
+ id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ output: LipsyncTtsPageOutput = pydantic_v1.Field()
+ """
+ Output of the run
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/lipsync_tts_page_status_response.py b/src/gooey/types/lipsync_tts_page_status_response.py
new file mode 100644
index 0000000..6ec9938
--- /dev/null
+++ b/src/gooey/types/lipsync_tts_page_status_response.py
@@ -0,0 +1,64 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .lipsync_tts_page_output import LipsyncTtsPageOutput
+from .recipe_run_state import RecipeRunState
+
+
+class LipsyncTtsPageStatusResponse(pydantic_v1.BaseModel):
+ run_id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ web_url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ run_time_sec: int = pydantic_v1.Field()
+ """
+ Total run time in seconds
+ """
+
+ status: RecipeRunState = pydantic_v1.Field()
+ """
+ Status of the run
+ """
+
+ detail: str = pydantic_v1.Field()
+ """
+ Details about the status of the run as a human readable string
+ """
+
+ output: typing.Optional[LipsyncTtsPageOutput] = pydantic_v1.Field(default=None)
+ """
+ Output of the run. Only available if status is `"completed"`
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/llm_tools.py b/src/gooey/types/llm_tools.py
new file mode 100644
index 0000000..62edec8
--- /dev/null
+++ b/src/gooey/types/llm_tools.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+LlmTools = typing.Literal["json_to_pdf"]
diff --git a/src/gooey/types/message_part.py b/src/gooey/types/message_part.py
new file mode 100644
index 0000000..6188cf7
--- /dev/null
+++ b/src/gooey/types/message_part.py
@@ -0,0 +1,49 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .recipe_run_state import RecipeRunState
+from .reply_button import ReplyButton
+
+
+class MessagePart(pydantic_v1.BaseModel):
+ status: RecipeRunState = pydantic_v1.Field()
+ """
+ Status of the run
+ """
+
+ detail: str = pydantic_v1.Field()
+ """
+ Details about the status of the run as a human readable string
+ """
+
+ text: typing.Optional[str] = None
+ audio: typing.Optional[str] = None
+ video: typing.Optional[str] = None
+ buttons: typing.Optional[typing.List[ReplyButton]] = None
+ documents: typing.Optional[typing.List[str]] = None
+ type: typing.Optional[str] = pydantic_v1.Field(default=None)
+ """
+ The partial outputs from the bot will be streamed in parts. Use this to update the user interface iteratively.
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/object_inpainting_page_output.py b/src/gooey/types/object_inpainting_page_output.py
new file mode 100644
index 0000000..9dbdaa7
--- /dev/null
+++ b/src/gooey/types/object_inpainting_page_output.py
@@ -0,0 +1,33 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .called_function_response import CalledFunctionResponse
+
+
+class ObjectInpaintingPageOutput(pydantic_v1.BaseModel):
+ resized_image: str
+ obj_mask: str
+ output_images: typing.List[str]
+ called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/object_inpainting_page_request.py b/src/gooey/types/object_inpainting_page_request.py
new file mode 100644
index 0000000..cf632bb
--- /dev/null
+++ b/src/gooey/types/object_inpainting_page_request.py
@@ -0,0 +1,55 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .object_inpainting_page_request_selected_model import ObjectInpaintingPageRequestSelectedModel
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+
+
+class ObjectInpaintingPageRequest(pydantic_v1.BaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ input_image: str
+ text_prompt: str
+ obj_scale: typing.Optional[float] = None
+ obj_pos_x: typing.Optional[float] = None
+ obj_pos_y: typing.Optional[float] = None
+ mask_threshold: typing.Optional[float] = None
+ selected_model: typing.Optional[ObjectInpaintingPageRequestSelectedModel] = None
+ negative_prompt: typing.Optional[str] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[int] = None
+ output_width: typing.Optional[int] = None
+ output_height: typing.Optional[int] = None
+ guidance_scale: typing.Optional[float] = None
+ sd2upscaling: typing.Optional[bool] = pydantic_v1.Field(alias="sd_2_upscaling", default=None)
+ seed: typing.Optional[int] = None
+ settings: typing.Optional[RunSettings] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ allow_population_by_field_name = True
+ populate_by_name = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/object_inpainting_page_request_selected_model.py b/src/gooey/types/object_inpainting_page_request_selected_model.py
new file mode 100644
index 0000000..92f1302
--- /dev/null
+++ b/src/gooey/types/object_inpainting_page_request_selected_model.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ObjectInpaintingPageRequestSelectedModel = typing.Union[
+ typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any
+]
diff --git a/src/gooey/types/object_inpainting_page_response.py b/src/gooey/types/object_inpainting_page_response.py
new file mode 100644
index 0000000..ff54304
--- /dev/null
+++ b/src/gooey/types/object_inpainting_page_response.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .object_inpainting_page_output import ObjectInpaintingPageOutput
+
+
+class ObjectInpaintingPageResponse(pydantic_v1.BaseModel):
+ id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ output: ObjectInpaintingPageOutput = pydantic_v1.Field()
+ """
+ Output of the run
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/object_inpainting_page_status_response.py b/src/gooey/types/object_inpainting_page_status_response.py
new file mode 100644
index 0000000..027b00e
--- /dev/null
+++ b/src/gooey/types/object_inpainting_page_status_response.py
@@ -0,0 +1,64 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .object_inpainting_page_output import ObjectInpaintingPageOutput
+from .recipe_run_state import RecipeRunState
+
+
+class ObjectInpaintingPageStatusResponse(pydantic_v1.BaseModel):
+ run_id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ web_url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ run_time_sec: int = pydantic_v1.Field()
+ """
+ Total run time in seconds
+ """
+
+ status: RecipeRunState = pydantic_v1.Field()
+ """
+ Status of the run
+ """
+
+ detail: str = pydantic_v1.Field()
+ """
+ Details about the status of the run as a human readable string
+ """
+
+ output: typing.Optional[ObjectInpaintingPageOutput] = pydantic_v1.Field(default=None)
+ """
+ Output of the run. Only available if status is `"completed"`
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/preprocess.py b/src/gooey/types/preprocess.py
new file mode 100644
index 0000000..6189430
--- /dev/null
+++ b/src/gooey/types/preprocess.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+Preprocess = typing.Union[typing.Literal["crop", "extcrop", "resize", "full", "extfull"], typing.Any]
diff --git a/src/gooey/types/prompt.py b/src/gooey/types/prompt.py
new file mode 100644
index 0000000..1375bae
--- /dev/null
+++ b/src/gooey/types/prompt.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+from .conversation_entry import ConversationEntry
+
+Prompt = typing.Union[str, typing.List[ConversationEntry]]
diff --git a/src/gooey/types/prompt_tree_node.py b/src/gooey/types/prompt_tree_node.py
new file mode 100644
index 0000000..eb68bba
--- /dev/null
+++ b/src/gooey/types/prompt_tree_node.py
@@ -0,0 +1,36 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .prompt import Prompt
+
+
+class PromptTreeNode(pydantic_v1.BaseModel):
+ prompt: Prompt
+ children: typing.List[PromptTreeNode]
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
+
+
+PromptTreeNode.update_forward_refs()
diff --git a/src/gooey/types/qr_code_generator_page_output.py b/src/gooey/types/qr_code_generator_page_output.py
new file mode 100644
index 0000000..169472a
--- /dev/null
+++ b/src/gooey/types/qr_code_generator_page_output.py
@@ -0,0 +1,34 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .called_function_response import CalledFunctionResponse
+
+
+class QrCodeGeneratorPageOutput(pydantic_v1.BaseModel):
+ output_images: typing.List[str]
+ raw_images: typing.List[str]
+ shortened_url: typing.Optional[str] = None
+ cleaned_qr_code: str
+ called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/qr_code_generator_page_request.py b/src/gooey/types/qr_code_generator_page_request.py
new file mode 100644
index 0000000..543e7a5
--- /dev/null
+++ b/src/gooey/types/qr_code_generator_page_request.py
@@ -0,0 +1,76 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .qr_code_generator_page_request_image_prompt_controlnet_models_item import (
+ QrCodeGeneratorPageRequestImagePromptControlnetModelsItem,
+)
+from .qr_code_generator_page_request_selected_controlnet_model_item import (
+ QrCodeGeneratorPageRequestSelectedControlnetModelItem,
+)
+from .qr_code_generator_page_request_selected_model import QrCodeGeneratorPageRequestSelectedModel
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+from .scheduler import Scheduler
+from .vcard import Vcard
+
+
+class QrCodeGeneratorPageRequest(pydantic_v1.BaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ qr_code_data: typing.Optional[str] = None
+ qr_code_input_image: typing.Optional[str] = None
+ qr_code_vcard: typing.Optional[Vcard] = None
+ qr_code_file: typing.Optional[str] = None
+ use_url_shortener: typing.Optional[bool] = None
+ text_prompt: str
+ negative_prompt: typing.Optional[str] = None
+ image_prompt: typing.Optional[str] = None
+ image_prompt_controlnet_models: typing.Optional[
+ typing.List[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]
+ ] = None
+ image_prompt_strength: typing.Optional[float] = None
+ image_prompt_scale: typing.Optional[float] = None
+ image_prompt_pos_x: typing.Optional[float] = None
+ image_prompt_pos_y: typing.Optional[float] = None
+ selected_model: typing.Optional[QrCodeGeneratorPageRequestSelectedModel] = None
+ selected_controlnet_model: typing.Optional[
+ typing.List[QrCodeGeneratorPageRequestSelectedControlnetModelItem]
+ ] = None
+ output_width: typing.Optional[int] = None
+ output_height: typing.Optional[int] = None
+ guidance_scale: typing.Optional[float] = None
+ controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[int] = None
+ scheduler: typing.Optional[Scheduler] = None
+ seed: typing.Optional[int] = None
+ obj_scale: typing.Optional[float] = None
+ obj_pos_x: typing.Optional[float] = None
+ obj_pos_y: typing.Optional[float] = None
+ settings: typing.Optional[RunSettings] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/qr_code_generator_page_request_image_prompt_controlnet_models_item.py b/src/gooey/types/qr_code_generator_page_request_image_prompt_controlnet_models_item.py
new file mode 100644
index 0000000..508e7e9
--- /dev/null
+++ b/src/gooey/types/qr_code_generator_page_request_image_prompt_controlnet_models_item.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+QrCodeGeneratorPageRequestImagePromptControlnetModelsItem = typing.Union[
+ typing.Literal[
+ "sd_controlnet_canny",
+ "sd_controlnet_depth",
+ "sd_controlnet_hed",
+ "sd_controlnet_mlsd",
+ "sd_controlnet_normal",
+ "sd_controlnet_openpose",
+ "sd_controlnet_scribble",
+ "sd_controlnet_seg",
+ "sd_controlnet_tile",
+ "sd_controlnet_brightness",
+ "control_v1p_sd15_qrcode_monster_v2",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/qr_code_generator_page_request_selected_controlnet_model_item.py b/src/gooey/types/qr_code_generator_page_request_selected_controlnet_model_item.py
new file mode 100644
index 0000000..c6f1967
--- /dev/null
+++ b/src/gooey/types/qr_code_generator_page_request_selected_controlnet_model_item.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+QrCodeGeneratorPageRequestSelectedControlnetModelItem = typing.Union[
+ typing.Literal[
+ "sd_controlnet_canny",
+ "sd_controlnet_depth",
+ "sd_controlnet_hed",
+ "sd_controlnet_mlsd",
+ "sd_controlnet_normal",
+ "sd_controlnet_openpose",
+ "sd_controlnet_scribble",
+ "sd_controlnet_seg",
+ "sd_controlnet_tile",
+ "sd_controlnet_brightness",
+ "control_v1p_sd15_qrcode_monster_v2",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/qr_code_generator_page_request_selected_model.py b/src/gooey/types/qr_code_generator_page_request_selected_model.py
new file mode 100644
index 0000000..97282cb
--- /dev/null
+++ b/src/gooey/types/qr_code_generator_page_request_selected_model.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+QrCodeGeneratorPageRequestSelectedModel = typing.Union[
+ typing.Literal[
+ "dream_shaper",
+ "dreamlike_2",
+ "sd_2",
+ "sd_1_5",
+ "dall_e",
+ "dall_e_3",
+ "openjourney_2",
+ "openjourney",
+ "analog_diffusion",
+ "protogen_5_3",
+ "jack_qiao",
+ "rodent_diffusion_1_5",
+ "deepfloyd_if",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/qr_code_generator_page_response.py b/src/gooey/types/qr_code_generator_page_response.py
new file mode 100644
index 0000000..10162d1
--- /dev/null
+++ b/src/gooey/types/qr_code_generator_page_response.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .qr_code_generator_page_output import QrCodeGeneratorPageOutput
+
+
+class QrCodeGeneratorPageResponse(pydantic_v1.BaseModel):
+ id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ output: QrCodeGeneratorPageOutput = pydantic_v1.Field()
+ """
+ Output of the run
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/qr_code_generator_page_status_response.py b/src/gooey/types/qr_code_generator_page_status_response.py
new file mode 100644
index 0000000..7d013d6
--- /dev/null
+++ b/src/gooey/types/qr_code_generator_page_status_response.py
@@ -0,0 +1,64 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .qr_code_generator_page_output import QrCodeGeneratorPageOutput
+from .recipe_run_state import RecipeRunState
+
+
+class QrCodeGeneratorPageStatusResponse(pydantic_v1.BaseModel):
+ run_id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ web_url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ run_time_sec: int = pydantic_v1.Field()
+ """
+ Total run time in seconds
+ """
+
+ status: RecipeRunState = pydantic_v1.Field()
+ """
+ Status of the run
+ """
+
+ detail: str = pydantic_v1.Field()
+ """
+ Details about the status of the run as a human readable string
+ """
+
+ output: typing.Optional[QrCodeGeneratorPageOutput] = pydantic_v1.Field(default=None)
+ """
+ Output of the run. Only available if status is `"completed"`
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/recipe_function.py b/src/gooey/types/recipe_function.py
new file mode 100644
index 0000000..0615d4e
--- /dev/null
+++ b/src/gooey/types/recipe_function.py
@@ -0,0 +1,38 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .trigger import Trigger
+
+
+class RecipeFunction(pydantic_v1.BaseModel):
+ url: str = pydantic_v1.Field()
+ """
+ The URL of the [function](https://gooey.ai/functions) to call.
+ """
+
+ trigger: Trigger = pydantic_v1.Field()
+ """
+ When to run this function. `pre` runs before the recipe, `post` runs after the recipe.
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/recipe_run_state.py b/src/gooey/types/recipe_run_state.py
new file mode 100644
index 0000000..9aaeaaf
--- /dev/null
+++ b/src/gooey/types/recipe_run_state.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+RecipeRunState = typing.Union[typing.Literal["starting", "running", "completed", "failed"], typing.Any]
diff --git a/src/gooey/types/related_doc_search_response.py b/src/gooey/types/related_doc_search_response.py
new file mode 100644
index 0000000..7eea58e
--- /dev/null
+++ b/src/gooey/types/related_doc_search_response.py
@@ -0,0 +1,34 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .search_reference import SearchReference
+
+
+class RelatedDocSearchResponse(pydantic_v1.BaseModel):
+ output_text: typing.List[str]
+ references: typing.List[SearchReference]
+ final_prompt: str
+ final_search_query: typing.Optional[str] = None
+ search_query: str
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/related_google_gpt_response.py b/src/gooey/types/related_google_gpt_response.py
new file mode 100644
index 0000000..456d31c
--- /dev/null
+++ b/src/gooey/types/related_google_gpt_response.py
@@ -0,0 +1,35 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .search_reference import SearchReference
+
+
+class RelatedGoogleGptResponse(pydantic_v1.BaseModel):
+ output_text: typing.List[str]
+ serp_results: typing.Dict[str, typing.Any]
+ references: typing.List[SearchReference]
+ final_prompt: str
+ final_search_query: typing.Optional[str] = None
+ search_query: str
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/related_qn_a_doc_page_output.py b/src/gooey/types/related_qn_a_doc_page_output.py
new file mode 100644
index 0000000..f680686
--- /dev/null
+++ b/src/gooey/types/related_qn_a_doc_page_output.py
@@ -0,0 +1,33 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .called_function_response import CalledFunctionResponse
+from .related_doc_search_response import RelatedDocSearchResponse
+
+
+class RelatedQnADocPageOutput(pydantic_v1.BaseModel):
+ output_queries: typing.List[RelatedDocSearchResponse]
+ serp_results: typing.Dict[str, typing.Any]
+ called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/related_qn_a_doc_page_request.py b/src/gooey/types/related_qn_a_doc_page_request.py
new file mode 100644
index 0000000..26b7871
--- /dev/null
+++ b/src/gooey/types/related_qn_a_doc_page_request.py
@@ -0,0 +1,78 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .recipe_function import RecipeFunction
+from .related_qn_a_doc_page_request_citation_style import RelatedQnADocPageRequestCitationStyle
+from .related_qn_a_doc_page_request_embedding_model import RelatedQnADocPageRequestEmbeddingModel
+from .related_qn_a_doc_page_request_keyword_query import RelatedQnADocPageRequestKeywordQuery
+from .related_qn_a_doc_page_request_selected_model import RelatedQnADocPageRequestSelectedModel
+from .run_settings import RunSettings
+from .serp_search_location import SerpSearchLocation
+from .serp_search_type import SerpSearchType
+
+
+class RelatedQnADocPageRequest(pydantic_v1.BaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ search_query: str
+ keyword_query: typing.Optional[RelatedQnADocPageRequestKeywordQuery] = None
+ documents: typing.Optional[typing.List[str]] = None
+ max_references: typing.Optional[int] = None
+ max_context_words: typing.Optional[int] = None
+ scroll_jump: typing.Optional[int] = None
+ doc_extract_url: typing.Optional[str] = None
+ embedding_model: typing.Optional[RelatedQnADocPageRequestEmbeddingModel] = None
+ dense_weight: typing.Optional[float] = pydantic_v1.Field(default=None)
+ """
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+ """
+
+ task_instructions: typing.Optional[str] = None
+ query_instructions: typing.Optional[str] = None
+ selected_model: typing.Optional[RelatedQnADocPageRequestSelectedModel] = None
+ avoid_repetition: typing.Optional[bool] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[float] = None
+ max_tokens: typing.Optional[int] = None
+ sampling_temperature: typing.Optional[float] = None
+ citation_style: typing.Optional[RelatedQnADocPageRequestCitationStyle] = None
+ serp_search_location: typing.Optional[SerpSearchLocation] = None
+ scaleserp_locations: typing.Optional[typing.List[str]] = pydantic_v1.Field(default=None)
+ """
+ DEPRECATED: use `serp_search_location` instead
+ """
+
+ serp_search_type: typing.Optional[SerpSearchType] = None
+ scaleserp_search_field: typing.Optional[str] = pydantic_v1.Field(default=None)
+ """
+ DEPRECATED: use `serp_search_type` instead
+ """
+
+ settings: typing.Optional[RunSettings] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/related_qn_a_doc_page_request_citation_style.py b/src/gooey/types/related_qn_a_doc_page_request_citation_style.py
new file mode 100644
index 0000000..b98f002
--- /dev/null
+++ b/src/gooey/types/related_qn_a_doc_page_request_citation_style.py
@@ -0,0 +1,25 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+RelatedQnADocPageRequestCitationStyle = typing.Union[
+ typing.Literal[
+ "number",
+ "title",
+ "url",
+ "symbol",
+ "markdown",
+ "html",
+ "slack_mrkdwn",
+ "plaintext",
+ "number_markdown",
+ "number_html",
+ "number_slack_mrkdwn",
+ "number_plaintext",
+ "symbol_markdown",
+ "symbol_html",
+ "symbol_slack_mrkdwn",
+ "symbol_plaintext",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/related_qn_a_doc_page_request_embedding_model.py b/src/gooey/types/related_qn_a_doc_page_request_embedding_model.py
new file mode 100644
index 0000000..680bbb5
--- /dev/null
+++ b/src/gooey/types/related_qn_a_doc_page_request_embedding_model.py
@@ -0,0 +1,18 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+RelatedQnADocPageRequestEmbeddingModel = typing.Union[
+ typing.Literal[
+ "openai_3_large",
+ "openai_3_small",
+ "openai_ada_2",
+ "e5_large_v2",
+ "e5_base_v2",
+ "multilingual_e5_base",
+ "multilingual_e5_large",
+ "gte_large",
+ "gte_base",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/related_qn_a_doc_page_request_keyword_query.py b/src/gooey/types/related_qn_a_doc_page_request_keyword_query.py
new file mode 100644
index 0000000..4f35322
--- /dev/null
+++ b/src/gooey/types/related_qn_a_doc_page_request_keyword_query.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+RelatedQnADocPageRequestKeywordQuery = typing.Union[str, typing.List[str]]
diff --git a/src/gooey/types/related_qn_a_doc_page_request_selected_model.py b/src/gooey/types/related_qn_a_doc_page_request_selected_model.py
new file mode 100644
index 0000000..b642574
--- /dev/null
+++ b/src/gooey/types/related_qn_a_doc_page_request_selected_model.py
@@ -0,0 +1,39 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+RelatedQnADocPageRequestSelectedModel = typing.Union[
+ typing.Literal[
+ "gpt_4_o",
+ "gpt_4_turbo_vision",
+ "gpt_4_vision",
+ "gpt_4_turbo",
+ "gpt_4",
+ "gpt_4_32k",
+ "gpt_3_5_turbo",
+ "gpt_3_5_turbo_16k",
+ "gpt_3_5_turbo_instruct",
+ "llama3_70b",
+ "llama3_8b",
+ "llama2_70b_chat",
+ "mixtral_8x7b_instruct_0_1",
+ "gemma_7b_it",
+ "gemini_1_5_pro",
+ "gemini_1_pro_vision",
+ "gemini_1_pro",
+ "palm2_chat",
+ "palm2_text",
+ "claude_3_5_sonnet",
+ "claude_3_opus",
+ "claude_3_sonnet",
+ "claude_3_haiku",
+ "sea_lion_7b_instruct",
+ "text_davinci_003",
+ "text_davinci_002",
+ "code_davinci_002",
+ "text_curie_001",
+ "text_babbage_001",
+ "text_ada_001",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/related_qn_a_doc_page_response.py b/src/gooey/types/related_qn_a_doc_page_response.py
new file mode 100644
index 0000000..3fa6b0c
--- /dev/null
+++ b/src/gooey/types/related_qn_a_doc_page_response.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .related_qn_a_doc_page_output import RelatedQnADocPageOutput
+
+
+class RelatedQnADocPageResponse(pydantic_v1.BaseModel):
+ id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ output: RelatedQnADocPageOutput = pydantic_v1.Field()
+ """
+ Output of the run
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/related_qn_a_doc_page_status_response.py b/src/gooey/types/related_qn_a_doc_page_status_response.py
new file mode 100644
index 0000000..6fa66e2
--- /dev/null
+++ b/src/gooey/types/related_qn_a_doc_page_status_response.py
@@ -0,0 +1,64 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .recipe_run_state import RecipeRunState
+from .related_qn_a_doc_page_output import RelatedQnADocPageOutput
+
+
+class RelatedQnADocPageStatusResponse(pydantic_v1.BaseModel):
+ run_id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ web_url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ run_time_sec: int = pydantic_v1.Field()
+ """
+ Total run time in seconds
+ """
+
+ status: RecipeRunState = pydantic_v1.Field()
+ """
+ Status of the run
+ """
+
+ detail: str = pydantic_v1.Field()
+ """
+ Details about the status of the run as a human readable string
+ """
+
+ output: typing.Optional[RelatedQnADocPageOutput] = pydantic_v1.Field(default=None)
+ """
+ Output of the run. Only available if status is `"completed"`
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/related_qn_a_page_output.py b/src/gooey/types/related_qn_a_page_output.py
new file mode 100644
index 0000000..d063410
--- /dev/null
+++ b/src/gooey/types/related_qn_a_page_output.py
@@ -0,0 +1,33 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .called_function_response import CalledFunctionResponse
+from .related_google_gpt_response import RelatedGoogleGptResponse
+
+
+class RelatedQnAPageOutput(pydantic_v1.BaseModel):
+ output_queries: typing.List[RelatedGoogleGptResponse]
+ serp_results: typing.Dict[str, typing.Any]
+ called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/related_qn_a_page_request.py b/src/gooey/types/related_qn_a_page_request.py
new file mode 100644
index 0000000..1d41492
--- /dev/null
+++ b/src/gooey/types/related_qn_a_page_request.py
@@ -0,0 +1,74 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .recipe_function import RecipeFunction
+from .related_qn_a_page_request_embedding_model import RelatedQnAPageRequestEmbeddingModel
+from .related_qn_a_page_request_selected_model import RelatedQnAPageRequestSelectedModel
+from .run_settings import RunSettings
+from .serp_search_location import SerpSearchLocation
+from .serp_search_type import SerpSearchType
+
+
+class RelatedQnAPageRequest(pydantic_v1.BaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ serp_search_location: typing.Optional[SerpSearchLocation] = None
+ scaleserp_locations: typing.Optional[typing.List[str]] = pydantic_v1.Field(default=None)
+ """
+ DEPRECATED: use `serp_search_location` instead
+ """
+
+ serp_search_type: typing.Optional[SerpSearchType] = None
+ scaleserp_search_field: typing.Optional[str] = pydantic_v1.Field(default=None)
+ """
+ DEPRECATED: use `serp_search_type` instead
+ """
+
+ search_query: str
+ site_filter: str
+ task_instructions: typing.Optional[str] = None
+ query_instructions: typing.Optional[str] = None
+ selected_model: typing.Optional[RelatedQnAPageRequestSelectedModel] = None
+ avoid_repetition: typing.Optional[bool] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[float] = None
+ max_tokens: typing.Optional[int] = None
+ sampling_temperature: typing.Optional[float] = None
+ max_search_urls: typing.Optional[int] = None
+ max_references: typing.Optional[int] = None
+ max_context_words: typing.Optional[int] = None
+ scroll_jump: typing.Optional[int] = None
+ embedding_model: typing.Optional[RelatedQnAPageRequestEmbeddingModel] = None
+ dense_weight: typing.Optional[float] = pydantic_v1.Field(default=None)
+ """
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+ """
+
+ settings: typing.Optional[RunSettings] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/related_qn_a_page_request_embedding_model.py b/src/gooey/types/related_qn_a_page_request_embedding_model.py
new file mode 100644
index 0000000..a591920
--- /dev/null
+++ b/src/gooey/types/related_qn_a_page_request_embedding_model.py
@@ -0,0 +1,18 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+RelatedQnAPageRequestEmbeddingModel = typing.Union[
+ typing.Literal[
+ "openai_3_large",
+ "openai_3_small",
+ "openai_ada_2",
+ "e5_large_v2",
+ "e5_base_v2",
+ "multilingual_e5_base",
+ "multilingual_e5_large",
+ "gte_large",
+ "gte_base",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/related_qn_a_page_request_selected_model.py b/src/gooey/types/related_qn_a_page_request_selected_model.py
new file mode 100644
index 0000000..72c52e8
--- /dev/null
+++ b/src/gooey/types/related_qn_a_page_request_selected_model.py
@@ -0,0 +1,39 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+RelatedQnAPageRequestSelectedModel = typing.Union[
+ typing.Literal[
+ "gpt_4_o",
+ "gpt_4_turbo_vision",
+ "gpt_4_vision",
+ "gpt_4_turbo",
+ "gpt_4",
+ "gpt_4_32k",
+ "gpt_3_5_turbo",
+ "gpt_3_5_turbo_16k",
+ "gpt_3_5_turbo_instruct",
+ "llama3_70b",
+ "llama3_8b",
+ "llama2_70b_chat",
+ "mixtral_8x7b_instruct_0_1",
+ "gemma_7b_it",
+ "gemini_1_5_pro",
+ "gemini_1_pro_vision",
+ "gemini_1_pro",
+ "palm2_chat",
+ "palm2_text",
+ "claude_3_5_sonnet",
+ "claude_3_opus",
+ "claude_3_sonnet",
+ "claude_3_haiku",
+ "sea_lion_7b_instruct",
+ "text_davinci_003",
+ "text_davinci_002",
+ "code_davinci_002",
+ "text_curie_001",
+ "text_babbage_001",
+ "text_ada_001",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/related_qn_a_page_response.py b/src/gooey/types/related_qn_a_page_response.py
new file mode 100644
index 0000000..f73e33b
--- /dev/null
+++ b/src/gooey/types/related_qn_a_page_response.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .related_qn_a_page_output import RelatedQnAPageOutput
+
+
+class RelatedQnAPageResponse(pydantic_v1.BaseModel):
+ id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ output: RelatedQnAPageOutput = pydantic_v1.Field()
+ """
+ Output of the run
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/related_qn_a_page_status_response.py b/src/gooey/types/related_qn_a_page_status_response.py
new file mode 100644
index 0000000..ef2209a
--- /dev/null
+++ b/src/gooey/types/related_qn_a_page_status_response.py
@@ -0,0 +1,64 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .recipe_run_state import RecipeRunState
+from .related_qn_a_page_output import RelatedQnAPageOutput
+
+
+class RelatedQnAPageStatusResponse(pydantic_v1.BaseModel):
+ run_id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ web_url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ run_time_sec: int = pydantic_v1.Field()
+ """
+ Total run time in seconds
+ """
+
+ status: RecipeRunState = pydantic_v1.Field()
+ """
+ Status of the run
+ """
+
+ detail: str = pydantic_v1.Field()
+ """
+ Details about the status of the run as a human readable string
+ """
+
+ output: typing.Optional[RelatedQnAPageOutput] = pydantic_v1.Field(default=None)
+ """
+ Output of the run. Only available if status is `"completed"`
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/reply_button.py b/src/gooey/types/reply_button.py
new file mode 100644
index 0000000..530a529
--- /dev/null
+++ b/src/gooey/types/reply_button.py
@@ -0,0 +1,30 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+
+
+class ReplyButton(pydantic_v1.BaseModel):
+ id: str
+ title: str
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/response_model.py b/src/gooey/types/response_model.py
new file mode 100644
index 0000000..59c0c0b
--- /dev/null
+++ b/src/gooey/types/response_model.py
@@ -0,0 +1,45 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .reply_button import ReplyButton
+from .response_model_final_keyword_query import ResponseModelFinalKeywordQuery
+from .response_model_final_prompt import ResponseModelFinalPrompt
+from .search_reference import SearchReference
+
+
+class ResponseModel(pydantic_v1.BaseModel):
+ final_prompt: typing.Optional[ResponseModelFinalPrompt] = None
+ output_text: typing.Optional[typing.List[str]] = None
+ output_audio: typing.Optional[typing.List[str]] = None
+ output_video: typing.Optional[typing.List[str]] = None
+ raw_input_text: typing.Optional[str] = None
+ raw_tts_text: typing.Optional[typing.List[str]] = None
+ raw_output_text: typing.Optional[typing.List[str]] = None
+ references: typing.Optional[typing.List[SearchReference]] = None
+ final_search_query: typing.Optional[str] = None
+ final_keyword_query: typing.Optional[ResponseModelFinalKeywordQuery] = None
+ output_documents: typing.Optional[typing.List[str]] = None
+ reply_buttons: typing.Optional[typing.List[ReplyButton]] = None
+ finish_reason: typing.Optional[typing.List[str]] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/response_model_final_keyword_query.py b/src/gooey/types/response_model_final_keyword_query.py
new file mode 100644
index 0000000..4cf1436
--- /dev/null
+++ b/src/gooey/types/response_model_final_keyword_query.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ResponseModelFinalKeywordQuery = typing.Union[str, typing.List[str]]
diff --git a/src/gooey/types/response_model_final_prompt.py b/src/gooey/types/response_model_final_prompt.py
new file mode 100644
index 0000000..99bf39f
--- /dev/null
+++ b/src/gooey/types/response_model_final_prompt.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+from .conversation_entry import ConversationEntry
+
+ResponseModelFinalPrompt = typing.Union[str, typing.List[ConversationEntry]]
diff --git a/src/gooey/types/role.py b/src/gooey/types/role.py
new file mode 100644
index 0000000..f300df5
--- /dev/null
+++ b/src/gooey/types/role.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+Role = typing.Union[typing.Literal["user", "system", "assistant"], typing.Any]
diff --git a/src/gooey/types/run_settings.py b/src/gooey/types/run_settings.py
new file mode 100644
index 0000000..182c1cf
--- /dev/null
+++ b/src/gooey/types/run_settings.py
@@ -0,0 +1,33 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .run_settings_retention_policy import RunSettingsRetentionPolicy
+
+
+class RunSettings(pydantic_v1.BaseModel):
+ retention_policy: typing.Optional[RunSettingsRetentionPolicy] = pydantic_v1.Field(default=None)
+ """
+ Policy for retaining the run data.
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/run_settings_retention_policy.py b/src/gooey/types/run_settings_retention_policy.py
new file mode 100644
index 0000000..45ceff1
--- /dev/null
+++ b/src/gooey/types/run_settings_retention_policy.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+RunSettingsRetentionPolicy = typing.Union[typing.Literal["keep", "delete"], typing.Any]
diff --git a/src/gooey/types/run_start.py b/src/gooey/types/run_start.py
new file mode 100644
index 0000000..947f644
--- /dev/null
+++ b/src/gooey/types/run_start.py
@@ -0,0 +1,52 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+
+
+class RunStart(pydantic_v1.BaseModel):
+ run_id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ web_url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ status_url: str = pydantic_v1.Field()
+ """
+ URL to check the status of the run. Also included in the `Location` header of the response.
+ """
+
+ type: typing.Optional[str] = pydantic_v1.Field(default=None)
+ """
+ The run was started. Save the IDs for future requests.Use the `status_url` to check the status of the run and fetch the complete output.
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/sad_talker_settings.py b/src/gooey/types/sad_talker_settings.py
new file mode 100644
index 0000000..5a1d758
--- /dev/null
+++ b/src/gooey/types/sad_talker_settings.py
@@ -0,0 +1,58 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .preprocess import Preprocess
+
+
+class SadTalkerSettings(pydantic_v1.BaseModel):
+ still: typing.Optional[bool] = None
+ preprocess: typing.Optional[Preprocess] = pydantic_v1.Field(default=None)
+ """
+ SadTalker only generates 512x512 output. 'crop' handles this by cropping the input to 512x512. 'resize' scales down the input to fit 512x512 and scales it back up after lipsyncing (does not work well for full person images, better for portraits). 'full' processes the cropped region and pastes it back into the original input. 'extcrop' and 'extfull' are similar to 'crop' and 'full' but with extended cropping.
+ """
+
+ pose_style: typing.Optional[int] = pydantic_v1.Field(default=None)
+ """
+ Random seed 0-45 inclusive that affects how the pose is animated.
+ """
+
+ expression_scale: typing.Optional[float] = pydantic_v1.Field(default=None)
+ """
+ Scale the amount of expression motion. 1.0 is normal, 0.5 is very reduced, and 2.0 is quite a lot.
+ """
+
+ ref_eyeblink: typing.Optional[str] = pydantic_v1.Field(default=None)
+ """
+ Optional reference video for eyeblinks to make the eyebrow movement more natural.
+ """
+
+ ref_pose: typing.Optional[str] = pydantic_v1.Field(default=None)
+ """
+ Optional reference video to pose the head.
+ """
+
+ input_yaw: typing.Optional[typing.List[int]] = None
+ input_pitch: typing.Optional[typing.List[int]] = None
+ input_roll: typing.Optional[typing.List[int]] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/scheduler.py b/src/gooey/types/scheduler.py
new file mode 100644
index 0000000..b18d974
--- /dev/null
+++ b/src/gooey/types/scheduler.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+Scheduler = typing.Union[
+ typing.Literal[
+ "singlestep_dpm_solver",
+ "multistep_dpm_solver",
+ "dpm_sde",
+ "dpm_discrete",
+ "dpm_discrete_ancestral",
+ "unipc",
+ "lms_discrete",
+ "heun",
+ "euler",
+ "euler_ancestral",
+ "pndm",
+ "ddpm",
+ "ddim",
+ "deis",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/search_reference.py b/src/gooey/types/search_reference.py
new file mode 100644
index 0000000..73d4377
--- /dev/null
+++ b/src/gooey/types/search_reference.py
@@ -0,0 +1,32 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+
+
+class SearchReference(pydantic_v1.BaseModel):
+ url: str
+ title: str
+ snippet: str
+ score: float
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/seo_summary_page_output.py b/src/gooey/types/seo_summary_page_output.py
new file mode 100644
index 0000000..d99f0d8
--- /dev/null
+++ b/src/gooey/types/seo_summary_page_output.py
@@ -0,0 +1,35 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .called_function_response import CalledFunctionResponse
+
+
+class SeoSummaryPageOutput(pydantic_v1.BaseModel):
+ output_content: typing.List[str]
+ serp_results: typing.Dict[str, typing.Any]
+ search_urls: typing.List[str]
+ summarized_urls: typing.List[typing.Dict[str, typing.Any]]
+ final_prompt: str
+ called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/seo_summary_page_request.py b/src/gooey/types/seo_summary_page_request.py
new file mode 100644
index 0000000..1f85d7a
--- /dev/null
+++ b/src/gooey/types/seo_summary_page_request.py
@@ -0,0 +1,60 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .run_settings import RunSettings
+from .seo_summary_page_request_selected_model import SeoSummaryPageRequestSelectedModel
+from .serp_search_location import SerpSearchLocation
+from .serp_search_type import SerpSearchType
+
+
+class SeoSummaryPageRequest(pydantic_v1.BaseModel):
+ serp_search_location: typing.Optional[SerpSearchLocation] = None
+ scaleserp_locations: typing.Optional[typing.List[str]] = pydantic_v1.Field(default=None)
+ """
+ DEPRECATED: use `serp_search_location` instead
+ """
+
+ serp_search_type: typing.Optional[SerpSearchType] = None
+ scaleserp_search_field: typing.Optional[str] = pydantic_v1.Field(default=None)
+ """
+ DEPRECATED: use `serp_search_type` instead
+ """
+
+ search_query: str
+ keywords: str
+ title: str
+ company_url: str
+ task_instructions: typing.Optional[str] = None
+ enable_html: typing.Optional[bool] = None
+ selected_model: typing.Optional[SeoSummaryPageRequestSelectedModel] = None
+ sampling_temperature: typing.Optional[float] = None
+ max_tokens: typing.Optional[int] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[float] = None
+ avoid_repetition: typing.Optional[bool] = None
+ max_search_urls: typing.Optional[int] = None
+ enable_crosslinks: typing.Optional[bool] = None
+ seed: typing.Optional[int] = None
+ settings: typing.Optional[RunSettings] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/seo_summary_page_request_selected_model.py b/src/gooey/types/seo_summary_page_request_selected_model.py
new file mode 100644
index 0000000..dd97fe4
--- /dev/null
+++ b/src/gooey/types/seo_summary_page_request_selected_model.py
@@ -0,0 +1,39 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+SeoSummaryPageRequestSelectedModel = typing.Union[
+ typing.Literal[
+ "gpt_4_o",
+ "gpt_4_turbo_vision",
+ "gpt_4_vision",
+ "gpt_4_turbo",
+ "gpt_4",
+ "gpt_4_32k",
+ "gpt_3_5_turbo",
+ "gpt_3_5_turbo_16k",
+ "gpt_3_5_turbo_instruct",
+ "llama3_70b",
+ "llama3_8b",
+ "llama2_70b_chat",
+ "mixtral_8x7b_instruct_0_1",
+ "gemma_7b_it",
+ "gemini_1_5_pro",
+ "gemini_1_pro_vision",
+ "gemini_1_pro",
+ "palm2_chat",
+ "palm2_text",
+ "claude_3_5_sonnet",
+ "claude_3_opus",
+ "claude_3_sonnet",
+ "claude_3_haiku",
+ "sea_lion_7b_instruct",
+ "text_davinci_003",
+ "text_davinci_002",
+ "code_davinci_002",
+ "text_curie_001",
+ "text_babbage_001",
+ "text_ada_001",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/seo_summary_page_response.py b/src/gooey/types/seo_summary_page_response.py
new file mode 100644
index 0000000..f9ad476
--- /dev/null
+++ b/src/gooey/types/seo_summary_page_response.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .seo_summary_page_output import SeoSummaryPageOutput
+
+
+class SeoSummaryPageResponse(pydantic_v1.BaseModel):
+ id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ output: SeoSummaryPageOutput = pydantic_v1.Field()
+ """
+ Output of the run
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/seo_summary_page_status_response.py b/src/gooey/types/seo_summary_page_status_response.py
new file mode 100644
index 0000000..6e5ddad
--- /dev/null
+++ b/src/gooey/types/seo_summary_page_status_response.py
@@ -0,0 +1,64 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .recipe_run_state import RecipeRunState
+from .seo_summary_page_output import SeoSummaryPageOutput
+
+
+class SeoSummaryPageStatusResponse(pydantic_v1.BaseModel):
+ run_id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ web_url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ run_time_sec: int = pydantic_v1.Field()
+ """
+ Total run time in seconds
+ """
+
+ status: RecipeRunState = pydantic_v1.Field()
+ """
+ Status of the run
+ """
+
+ detail: str = pydantic_v1.Field()
+ """
+ Details about the status of the run as a human readable string
+ """
+
+ output: typing.Optional[SeoSummaryPageOutput] = pydantic_v1.Field(default=None)
+ """
+ Output of the run. Only available if status is `"completed"`
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/serp_search_location.py b/src/gooey/types/serp_search_location.py
new file mode 100644
index 0000000..cdabfea
--- /dev/null
+++ b/src/gooey/types/serp_search_location.py
@@ -0,0 +1,248 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+SerpSearchLocation = typing.Union[
+ typing.Literal[
+ "af",
+ "al",
+ "dz",
+ "as",
+ "ad",
+ "ao",
+ "ai",
+ "aq",
+ "ag",
+ "ar",
+ "am",
+ "aw",
+ "au",
+ "at",
+ "az",
+ "bs",
+ "bh",
+ "bd",
+ "bb",
+ "by",
+ "be",
+ "bz",
+ "bj",
+ "bm",
+ "bt",
+ "bo",
+ "ba",
+ "bw",
+ "bv",
+ "br",
+ "io",
+ "bn",
+ "bg",
+ "bf",
+ "bi",
+ "kh",
+ "cm",
+ "ca",
+ "cv",
+ "ky",
+ "cf",
+ "td",
+ "cl",
+ "cn",
+ "cx",
+ "cc",
+ "co",
+ "km",
+ "cg",
+ "cd",
+ "ck",
+ "cr",
+ "ci",
+ "hr",
+ "cu",
+ "cy",
+ "cz",
+ "dk",
+ "dj",
+ "dm",
+ "do",
+ "ec",
+ "eg",
+ "sv",
+ "gq",
+ "er",
+ "ee",
+ "et",
+ "fk",
+ "fo",
+ "fj",
+ "fi",
+ "fr",
+ "gf",
+ "pf",
+ "tf",
+ "ga",
+ "gm",
+ "ge",
+ "de",
+ "gh",
+ "gi",
+ "gr",
+ "gl",
+ "gd",
+ "gp",
+ "gu",
+ "gt",
+ "gn",
+ "gw",
+ "gy",
+ "ht",
+ "hm",
+ "va",
+ "hn",
+ "hk",
+ "hu",
+ "is",
+ "in",
+ "id",
+ "ir",
+ "iq",
+ "ie",
+ "il",
+ "it",
+ "jm",
+ "jp",
+ "jo",
+ "kz",
+ "ke",
+ "ki",
+ "kp",
+ "kr",
+ "kw",
+ "kg",
+ "la",
+ "lv",
+ "lb",
+ "ls",
+ "lr",
+ "ly",
+ "li",
+ "lt",
+ "lu",
+ "mo",
+ "mk",
+ "mg",
+ "mw",
+ "my",
+ "mv",
+ "ml",
+ "mt",
+ "mh",
+ "mq",
+ "mr",
+ "mu",
+ "yt",
+ "mx",
+ "fm",
+ "md",
+ "mc",
+ "mn",
+ "ms",
+ "ma",
+ "mz",
+ "mm",
+ "na",
+ "nr",
+ "np",
+ "nl",
+ "an",
+ "nc",
+ "nz",
+ "ni",
+ "ne",
+ "ng",
+ "nu",
+ "nf",
+ "mp",
+ "no",
+ "om",
+ "pk",
+ "pw",
+ "ps",
+ "pa",
+ "pg",
+ "py",
+ "pe",
+ "ph",
+ "pn",
+ "pl",
+ "pt",
+ "pr",
+ "qa",
+ "re",
+ "ro",
+ "ru",
+ "rw",
+ "sh",
+ "kn",
+ "lc",
+ "pm",
+ "vc",
+ "ws",
+ "sm",
+ "st",
+ "sa",
+ "sn",
+ "rs",
+ "sc",
+ "sl",
+ "sg",
+ "sk",
+ "si",
+ "sb",
+ "so",
+ "za",
+ "gs",
+ "es",
+ "lk",
+ "sd",
+ "sr",
+ "sj",
+ "sz",
+ "se",
+ "ch",
+ "sy",
+ "tw",
+ "tj",
+ "tz",
+ "th",
+ "tl",
+ "tg",
+ "tk",
+ "to",
+ "tt",
+ "tn",
+ "tr",
+ "tm",
+ "tc",
+ "tv",
+ "ug",
+ "ua",
+ "ae",
+ "gb",
+ "us",
+ "um",
+ "uy",
+ "uz",
+ "vu",
+ "ve",
+ "vn",
+ "vg",
+ "vi",
+ "wf",
+ "eh",
+ "ye",
+ "zm",
+ "zw",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/serp_search_type.py b/src/gooey/types/serp_search_type.py
new file mode 100644
index 0000000..ff2e8d1
--- /dev/null
+++ b/src/gooey/types/serp_search_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+SerpSearchType = typing.Union[typing.Literal["search", "images", "videos", "places", "news"], typing.Any]
diff --git a/src/gooey/types/smart_gpt_page_output.py b/src/gooey/types/smart_gpt_page_output.py
new file mode 100644
index 0000000..bbf4f24
--- /dev/null
+++ b/src/gooey/types/smart_gpt_page_output.py
@@ -0,0 +1,33 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .called_function_response import CalledFunctionResponse
+from .prompt_tree_node import PromptTreeNode
+
+
+class SmartGptPageOutput(pydantic_v1.BaseModel):
+ output_text: typing.List[str]
+ prompt_tree: typing.Optional[typing.List[PromptTreeNode]] = None
+ called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/smart_gpt_page_request.py b/src/gooey/types/smart_gpt_page_request.py
new file mode 100644
index 0000000..267f0a7
--- /dev/null
+++ b/src/gooey/types/smart_gpt_page_request.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+from .smart_gpt_page_request_selected_model import SmartGptPageRequestSelectedModel
+
+
+class SmartGptPageRequest(pydantic_v1.BaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ input_prompt: str
+ cot_prompt: typing.Optional[str] = None
+ reflexion_prompt: typing.Optional[str] = None
+ dera_prompt: typing.Optional[str] = None
+ selected_model: typing.Optional[SmartGptPageRequestSelectedModel] = None
+ avoid_repetition: typing.Optional[bool] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[float] = None
+ max_tokens: typing.Optional[int] = None
+ sampling_temperature: typing.Optional[float] = None
+ settings: typing.Optional[RunSettings] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/smart_gpt_page_request_selected_model.py b/src/gooey/types/smart_gpt_page_request_selected_model.py
new file mode 100644
index 0000000..f5868c7
--- /dev/null
+++ b/src/gooey/types/smart_gpt_page_request_selected_model.py
@@ -0,0 +1,39 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+SmartGptPageRequestSelectedModel = typing.Union[
+ typing.Literal[
+ "gpt_4_o",
+ "gpt_4_turbo_vision",
+ "gpt_4_vision",
+ "gpt_4_turbo",
+ "gpt_4",
+ "gpt_4_32k",
+ "gpt_3_5_turbo",
+ "gpt_3_5_turbo_16k",
+ "gpt_3_5_turbo_instruct",
+ "llama3_70b",
+ "llama3_8b",
+ "llama2_70b_chat",
+ "mixtral_8x7b_instruct_0_1",
+ "gemma_7b_it",
+ "gemini_1_5_pro",
+ "gemini_1_pro_vision",
+ "gemini_1_pro",
+ "palm2_chat",
+ "palm2_text",
+ "claude_3_5_sonnet",
+ "claude_3_opus",
+ "claude_3_sonnet",
+ "claude_3_haiku",
+ "sea_lion_7b_instruct",
+ "text_davinci_003",
+ "text_davinci_002",
+ "code_davinci_002",
+ "text_curie_001",
+ "text_babbage_001",
+ "text_ada_001",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/smart_gpt_page_response.py b/src/gooey/types/smart_gpt_page_response.py
new file mode 100644
index 0000000..9a316f8
--- /dev/null
+++ b/src/gooey/types/smart_gpt_page_response.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .smart_gpt_page_output import SmartGptPageOutput
+
+
+class SmartGptPageResponse(pydantic_v1.BaseModel):
+ id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ output: SmartGptPageOutput = pydantic_v1.Field()
+ """
+ Output of the run
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/smart_gpt_page_status_response.py b/src/gooey/types/smart_gpt_page_status_response.py
new file mode 100644
index 0000000..2fb6d3d
--- /dev/null
+++ b/src/gooey/types/smart_gpt_page_status_response.py
@@ -0,0 +1,64 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .recipe_run_state import RecipeRunState
+from .smart_gpt_page_output import SmartGptPageOutput
+
+
+class SmartGptPageStatusResponse(pydantic_v1.BaseModel):
+ run_id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ web_url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ run_time_sec: int = pydantic_v1.Field()
+ """
+ Total run time in seconds
+ """
+
+ status: RecipeRunState = pydantic_v1.Field()
+ """
+ Status of the run
+ """
+
+ detail: str = pydantic_v1.Field()
+ """
+ Details about the status of the run as a human readable string
+ """
+
+ output: typing.Optional[SmartGptPageOutput] = pydantic_v1.Field(default=None)
+ """
+ Output of the run. Only available if status is `"completed"`
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/social_lookup_email_page_output.py b/src/gooey/types/social_lookup_email_page_output.py
new file mode 100644
index 0000000..8cae1ab
--- /dev/null
+++ b/src/gooey/types/social_lookup_email_page_output.py
@@ -0,0 +1,33 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .called_function_response import CalledFunctionResponse
+
+
+class SocialLookupEmailPageOutput(pydantic_v1.BaseModel):
+ person_data: typing.Dict[str, typing.Any]
+ final_prompt: str
+ output_text: typing.List[str]
+ called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/social_lookup_email_page_request.py b/src/gooey/types/social_lookup_email_page_request.py
new file mode 100644
index 0000000..9cd236b
--- /dev/null
+++ b/src/gooey/types/social_lookup_email_page_request.py
@@ -0,0 +1,46 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+from .social_lookup_email_page_request_selected_model import SocialLookupEmailPageRequestSelectedModel
+
+
+class SocialLookupEmailPageRequest(pydantic_v1.BaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ email_address: str
+ input_prompt: typing.Optional[str] = None
+ selected_model: typing.Optional[SocialLookupEmailPageRequestSelectedModel] = None
+ num_outputs: typing.Optional[int] = None
+ avoid_repetition: typing.Optional[bool] = None
+ quality: typing.Optional[float] = None
+ max_tokens: typing.Optional[int] = None
+ sampling_temperature: typing.Optional[float] = None
+ settings: typing.Optional[RunSettings] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/social_lookup_email_page_request_selected_model.py b/src/gooey/types/social_lookup_email_page_request_selected_model.py
new file mode 100644
index 0000000..41c39fd
--- /dev/null
+++ b/src/gooey/types/social_lookup_email_page_request_selected_model.py
@@ -0,0 +1,39 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+SocialLookupEmailPageRequestSelectedModel = typing.Union[
+ typing.Literal[
+ "gpt_4_o",
+ "gpt_4_turbo_vision",
+ "gpt_4_vision",
+ "gpt_4_turbo",
+ "gpt_4",
+ "gpt_4_32k",
+ "gpt_3_5_turbo",
+ "gpt_3_5_turbo_16k",
+ "gpt_3_5_turbo_instruct",
+ "llama3_70b",
+ "llama3_8b",
+ "llama2_70b_chat",
+ "mixtral_8x7b_instruct_0_1",
+ "gemma_7b_it",
+ "gemini_1_5_pro",
+ "gemini_1_pro_vision",
+ "gemini_1_pro",
+ "palm2_chat",
+ "palm2_text",
+ "claude_3_5_sonnet",
+ "claude_3_opus",
+ "claude_3_sonnet",
+ "claude_3_haiku",
+ "sea_lion_7b_instruct",
+ "text_davinci_003",
+ "text_davinci_002",
+ "code_davinci_002",
+ "text_curie_001",
+ "text_babbage_001",
+ "text_ada_001",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/social_lookup_email_page_response.py b/src/gooey/types/social_lookup_email_page_response.py
new file mode 100644
index 0000000..74222a1
--- /dev/null
+++ b/src/gooey/types/social_lookup_email_page_response.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .social_lookup_email_page_output import SocialLookupEmailPageOutput
+
+
+class SocialLookupEmailPageResponse(pydantic_v1.BaseModel):
+ id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ output: SocialLookupEmailPageOutput = pydantic_v1.Field()
+ """
+ Output of the run
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/social_lookup_email_page_status_response.py b/src/gooey/types/social_lookup_email_page_status_response.py
new file mode 100644
index 0000000..cc684ad
--- /dev/null
+++ b/src/gooey/types/social_lookup_email_page_status_response.py
@@ -0,0 +1,64 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .recipe_run_state import RecipeRunState
+from .social_lookup_email_page_output import SocialLookupEmailPageOutput
+
+
+class SocialLookupEmailPageStatusResponse(pydantic_v1.BaseModel):
+ run_id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ web_url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ run_time_sec: int = pydantic_v1.Field()
+ """
+ Total run time in seconds
+ """
+
+ status: RecipeRunState = pydantic_v1.Field()
+ """
+ Status of the run
+ """
+
+ detail: str = pydantic_v1.Field()
+ """
+ Details about the status of the run as a human readable string
+ """
+
+ output: typing.Optional[SocialLookupEmailPageOutput] = pydantic_v1.Field(default=None)
+ """
+ Output of the run. Only available if status is `"completed"`
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/stream_error.py b/src/gooey/types/stream_error.py
new file mode 100644
index 0000000..68ec185
--- /dev/null
+++ b/src/gooey/types/stream_error.py
@@ -0,0 +1,37 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+
+
+class StreamError(pydantic_v1.BaseModel):
+ detail: str = pydantic_v1.Field()
+ """
+ Details about the error
+ """
+
+ type: typing.Optional[str] = pydantic_v1.Field(default=None)
+ """
+ An error occurred. The stream has ended.
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/text2audio_page_output.py b/src/gooey/types/text2audio_page_output.py
new file mode 100644
index 0000000..1aae6cf
--- /dev/null
+++ b/src/gooey/types/text2audio_page_output.py
@@ -0,0 +1,31 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .called_function_response import CalledFunctionResponse
+
+
+class Text2AudioPageOutput(pydantic_v1.BaseModel):
+ output_audios: typing.Dict[str, typing.List[str]]
+ called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/text2audio_page_request.py b/src/gooey/types/text2audio_page_request.py
new file mode 100644
index 0000000..ad99887
--- /dev/null
+++ b/src/gooey/types/text2audio_page_request.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+
+
+class Text2AudioPageRequest(pydantic_v1.BaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ text_prompt: str
+ negative_prompt: typing.Optional[str] = None
+ duration_sec: typing.Optional[float] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[int] = None
+ guidance_scale: typing.Optional[float] = None
+ seed: typing.Optional[int] = None
+ sd2upscaling: typing.Optional[bool] = pydantic_v1.Field(alias="sd_2_upscaling", default=None)
+ selected_models: typing.Optional[typing.List[typing.Literal["audio_ldm"]]] = None
+ settings: typing.Optional[RunSettings] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ allow_population_by_field_name = True
+ populate_by_name = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/text2audio_page_response.py b/src/gooey/types/text2audio_page_response.py
new file mode 100644
index 0000000..4f2bbac
--- /dev/null
+++ b/src/gooey/types/text2audio_page_response.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .text2audio_page_output import Text2AudioPageOutput
+
+
+class Text2AudioPageResponse(pydantic_v1.BaseModel):
+ id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ output: Text2AudioPageOutput = pydantic_v1.Field()
+ """
+ Output of the run
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/text2audio_page_status_response.py b/src/gooey/types/text2audio_page_status_response.py
new file mode 100644
index 0000000..f4f6719
--- /dev/null
+++ b/src/gooey/types/text2audio_page_status_response.py
@@ -0,0 +1,64 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .recipe_run_state import RecipeRunState
+from .text2audio_page_output import Text2AudioPageOutput
+
+
+class Text2AudioPageStatusResponse(pydantic_v1.BaseModel):
+ run_id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ web_url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ run_time_sec: int = pydantic_v1.Field()
+ """
+ Total run time in seconds
+ """
+
+ status: RecipeRunState = pydantic_v1.Field()
+ """
+ Status of the run
+ """
+
+ detail: str = pydantic_v1.Field()
+ """
+ Details about the status of the run as a human readable string
+ """
+
+ output: typing.Optional[Text2AudioPageOutput] = pydantic_v1.Field(default=None)
+ """
+ Output of the run. Only available if status is `"completed"`
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/text_to_speech_page_output.py b/src/gooey/types/text_to_speech_page_output.py
new file mode 100644
index 0000000..340183f
--- /dev/null
+++ b/src/gooey/types/text_to_speech_page_output.py
@@ -0,0 +1,31 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .called_function_response import CalledFunctionResponse
+
+
+class TextToSpeechPageOutput(pydantic_v1.BaseModel):
+ audio_url: str
+ called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/text_to_speech_page_request.py b/src/gooey/types/text_to_speech_page_request.py
new file mode 100644
index 0000000..71578ae
--- /dev/null
+++ b/src/gooey/types/text_to_speech_page_request.py
@@ -0,0 +1,63 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+from .text_to_speech_page_request_openai_tts_model import TextToSpeechPageRequestOpenaiTtsModel
+from .text_to_speech_page_request_openai_voice_name import TextToSpeechPageRequestOpenaiVoiceName
+from .text_to_speech_page_request_tts_provider import TextToSpeechPageRequestTtsProvider
+
+
+class TextToSpeechPageRequest(pydantic_v1.BaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ text_prompt: str
+ tts_provider: typing.Optional[TextToSpeechPageRequestTtsProvider] = None
+ uberduck_voice_name: typing.Optional[str] = None
+ uberduck_speaking_rate: typing.Optional[float] = None
+ google_voice_name: typing.Optional[str] = None
+ google_speaking_rate: typing.Optional[float] = None
+ google_pitch: typing.Optional[float] = None
+ bark_history_prompt: typing.Optional[str] = None
+ elevenlabs_voice_name: typing.Optional[str] = pydantic_v1.Field(default=None)
+ """
+ Use `elevenlabs_voice_id` instead
+ """
+
+ elevenlabs_api_key: typing.Optional[str] = None
+ elevenlabs_voice_id: typing.Optional[str] = None
+ elevenlabs_model: typing.Optional[str] = None
+ elevenlabs_stability: typing.Optional[float] = None
+ elevenlabs_similarity_boost: typing.Optional[float] = None
+ elevenlabs_style: typing.Optional[float] = None
+ elevenlabs_speaker_boost: typing.Optional[bool] = None
+ azure_voice_name: typing.Optional[str] = None
+ openai_voice_name: typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] = None
+ openai_tts_model: typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] = None
+ settings: typing.Optional[RunSettings] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/text_to_speech_page_request_openai_tts_model.py b/src/gooey/types/text_to_speech_page_request_openai_tts_model.py
new file mode 100644
index 0000000..685dfff
--- /dev/null
+++ b/src/gooey/types/text_to_speech_page_request_openai_tts_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+TextToSpeechPageRequestOpenaiTtsModel = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any]
diff --git a/src/gooey/types/text_to_speech_page_request_openai_voice_name.py b/src/gooey/types/text_to_speech_page_request_openai_voice_name.py
new file mode 100644
index 0000000..efd862f
--- /dev/null
+++ b/src/gooey/types/text_to_speech_page_request_openai_voice_name.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+TextToSpeechPageRequestOpenaiVoiceName = typing.Union[
+ typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any
+]
diff --git a/src/gooey/types/text_to_speech_page_request_tts_provider.py b/src/gooey/types/text_to_speech_page_request_tts_provider.py
new file mode 100644
index 0000000..a6b8938
--- /dev/null
+++ b/src/gooey/types/text_to_speech_page_request_tts_provider.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+TextToSpeechPageRequestTtsProvider = typing.Union[
+ typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any
+]
diff --git a/src/gooey/types/text_to_speech_page_response.py b/src/gooey/types/text_to_speech_page_response.py
new file mode 100644
index 0000000..247c37d
--- /dev/null
+++ b/src/gooey/types/text_to_speech_page_response.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .text_to_speech_page_output import TextToSpeechPageOutput
+
+
+class TextToSpeechPageResponse(pydantic_v1.BaseModel):
+ id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ output: TextToSpeechPageOutput = pydantic_v1.Field()
+ """
+ Output of the run
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/text_to_speech_page_status_response.py b/src/gooey/types/text_to_speech_page_status_response.py
new file mode 100644
index 0000000..0b046ed
--- /dev/null
+++ b/src/gooey/types/text_to_speech_page_status_response.py
@@ -0,0 +1,64 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .recipe_run_state import RecipeRunState
+from .text_to_speech_page_output import TextToSpeechPageOutput
+
+
+class TextToSpeechPageStatusResponse(pydantic_v1.BaseModel):
+ run_id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ web_url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ run_time_sec: int = pydantic_v1.Field()
+ """
+ Total run time in seconds
+ """
+
+ status: RecipeRunState = pydantic_v1.Field()
+ """
+ Status of the run
+ """
+
+ detail: str = pydantic_v1.Field()
+ """
+ Details about the status of the run as a human readable string
+ """
+
+ output: typing.Optional[TextToSpeechPageOutput] = pydantic_v1.Field(default=None)
+ """
+ Output of the run. Only available if status is `"completed"`
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/training_data_model.py b/src/gooey/types/training_data_model.py
new file mode 100644
index 0000000..16e3534
--- /dev/null
+++ b/src/gooey/types/training_data_model.py
@@ -0,0 +1,30 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+
+
+class TrainingDataModel(pydantic_v1.BaseModel):
+ prompt: str
+ completion: str
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/translation_page_output.py b/src/gooey/types/translation_page_output.py
new file mode 100644
index 0000000..c450115
--- /dev/null
+++ b/src/gooey/types/translation_page_output.py
@@ -0,0 +1,31 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .called_function_response import CalledFunctionResponse
+
+
+class TranslationPageOutput(pydantic_v1.BaseModel):
+ output_texts: typing.Optional[typing.List[str]] = None
+ called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/translation_page_request.py b/src/gooey/types/translation_page_request.py
new file mode 100644
index 0000000..8873433
--- /dev/null
+++ b/src/gooey/types/translation_page_request.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+from .translation_page_request_selected_model import TranslationPageRequestSelectedModel
+
+
+class TranslationPageRequest(pydantic_v1.BaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ texts: typing.Optional[typing.List[str]] = None
+ selected_model: typing.Optional[TranslationPageRequestSelectedModel] = None
+ translation_source: typing.Optional[str] = None
+ translation_target: typing.Optional[str] = None
+ glossary_document: typing.Optional[str] = pydantic_v1.Field(default=None)
+ """
+ Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+ If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+ """
+
+ settings: typing.Optional[RunSettings] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/translation_page_request_selected_model.py b/src/gooey/types/translation_page_request_selected_model.py
new file mode 100644
index 0000000..62ae9ab
--- /dev/null
+++ b/src/gooey/types/translation_page_request_selected_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+TranslationPageRequestSelectedModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any]
diff --git a/src/gooey/types/translation_page_response.py b/src/gooey/types/translation_page_response.py
new file mode 100644
index 0000000..ee6d54a
--- /dev/null
+++ b/src/gooey/types/translation_page_response.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .translation_page_output import TranslationPageOutput
+
+
+class TranslationPageResponse(pydantic_v1.BaseModel):
+ id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ output: TranslationPageOutput = pydantic_v1.Field()
+ """
+ Output of the run
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/translation_page_status_response.py b/src/gooey/types/translation_page_status_response.py
new file mode 100644
index 0000000..b85e1dc
--- /dev/null
+++ b/src/gooey/types/translation_page_status_response.py
@@ -0,0 +1,64 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .recipe_run_state import RecipeRunState
+from .translation_page_output import TranslationPageOutput
+
+
+class TranslationPageStatusResponse(pydantic_v1.BaseModel):
+ run_id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ web_url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ run_time_sec: int = pydantic_v1.Field()
+ """
+ Total run time in seconds
+ """
+
+ status: RecipeRunState = pydantic_v1.Field()
+ """
+ Status of the run
+ """
+
+ detail: str = pydantic_v1.Field()
+ """
+ Details about the status of the run as a human readable string
+ """
+
+ output: typing.Optional[TranslationPageOutput] = pydantic_v1.Field(default=None)
+ """
+ Output of the run. Only available if status is `"completed"`
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/trigger.py b/src/gooey/types/trigger.py
new file mode 100644
index 0000000..99821c7
--- /dev/null
+++ b/src/gooey/types/trigger.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+Trigger = typing.Union[typing.Literal["pre", "post"], typing.Any]
diff --git a/src/gooey/types/validation_error.py b/src/gooey/types/validation_error.py
new file mode 100644
index 0000000..ff2fcc0
--- /dev/null
+++ b/src/gooey/types/validation_error.py
@@ -0,0 +1,32 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .validation_error_loc_item import ValidationErrorLocItem
+
+
+class ValidationError(pydantic_v1.BaseModel):
+ loc: typing.List[ValidationErrorLocItem]
+ msg: str
+ type: str
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/validation_error_loc_item.py b/src/gooey/types/validation_error_loc_item.py
new file mode 100644
index 0000000..9a0a83f
--- /dev/null
+++ b/src/gooey/types/validation_error_loc_item.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ValidationErrorLocItem = typing.Union[str, int]
diff --git a/src/gooey/types/vcard.py b/src/gooey/types/vcard.py
new file mode 100644
index 0000000..49079e0
--- /dev/null
+++ b/src/gooey/types/vcard.py
@@ -0,0 +1,54 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+
+
+class Vcard(pydantic_v1.BaseModel):
+ format_name: str
+ email: typing.Optional[str] = None
+ gender: typing.Optional[str] = None
+ birthday_year: typing.Optional[int] = None
+ birthday_month: typing.Optional[int] = None
+ birthday_day: typing.Optional[int] = None
+ family_name: typing.Optional[str] = None
+ given_name: typing.Optional[str] = None
+ middle_names: typing.Optional[str] = None
+ honorific_prefixes: typing.Optional[str] = None
+ honorific_suffixes: typing.Optional[str] = None
+ impp: typing.Optional[str] = None
+ address: typing.Optional[str] = None
+ calendar_url: typing.Optional[str] = None
+ comma_separated_categories: typing.Optional[str] = None
+ kind: typing.Optional[str] = None
+ language: typing.Optional[str] = None
+ organization: typing.Optional[str] = None
+ photo_url: typing.Optional[str] = None
+ logo_url: typing.Optional[str] = None
+ role: typing.Optional[str] = None
+ timezone: typing.Optional[str] = None
+ job_title: typing.Optional[str] = None
+ urls: typing.Optional[typing.List[str]] = None
+ tel: typing.Optional[str] = None
+ note: typing.Optional[str] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/video_bots_page_output.py b/src/gooey/types/video_bots_page_output.py
new file mode 100644
index 0000000..c39d4e9
--- /dev/null
+++ b/src/gooey/types/video_bots_page_output.py
@@ -0,0 +1,47 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .called_function_response import CalledFunctionResponse
+from .reply_button import ReplyButton
+from .search_reference import SearchReference
+from .video_bots_page_output_final_keyword_query import VideoBotsPageOutputFinalKeywordQuery
+from .video_bots_page_output_final_prompt import VideoBotsPageOutputFinalPrompt
+
+
+class VideoBotsPageOutput(pydantic_v1.BaseModel):
+ final_prompt: typing.Optional[VideoBotsPageOutputFinalPrompt] = None
+ output_text: typing.Optional[typing.List[str]] = None
+ output_audio: typing.Optional[typing.List[str]] = None
+ output_video: typing.Optional[typing.List[str]] = None
+ raw_input_text: typing.Optional[str] = None
+ raw_tts_text: typing.Optional[typing.List[str]] = None
+ raw_output_text: typing.Optional[typing.List[str]] = None
+ references: typing.Optional[typing.List[SearchReference]] = None
+ final_search_query: typing.Optional[str] = None
+ final_keyword_query: typing.Optional[VideoBotsPageOutputFinalKeywordQuery] = None
+ output_documents: typing.Optional[typing.List[str]] = None
+ reply_buttons: typing.Optional[typing.List[ReplyButton]] = None
+ finish_reason: typing.Optional[typing.List[str]] = None
+ called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/video_bots_page_output_final_keyword_query.py b/src/gooey/types/video_bots_page_output_final_keyword_query.py
new file mode 100644
index 0000000..5fd5f17
--- /dev/null
+++ b/src/gooey/types/video_bots_page_output_final_keyword_query.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+VideoBotsPageOutputFinalKeywordQuery = typing.Union[str, typing.List[str]]
diff --git a/src/gooey/types/video_bots_page_output_final_prompt.py b/src/gooey/types/video_bots_page_output_final_prompt.py
new file mode 100644
index 0000000..59fcaab
--- /dev/null
+++ b/src/gooey/types/video_bots_page_output_final_prompt.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+from .conversation_entry import ConversationEntry
+
+VideoBotsPageOutputFinalPrompt = typing.Union[str, typing.List[ConversationEntry]]
diff --git a/src/gooey/types/video_bots_page_request.py b/src/gooey/types/video_bots_page_request.py
new file mode 100644
index 0000000..40b3f56
--- /dev/null
+++ b/src/gooey/types/video_bots_page_request.py
@@ -0,0 +1,147 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .conversation_entry import ConversationEntry
+from .llm_tools import LlmTools
+from .recipe_function import RecipeFunction
+from .run_settings import RunSettings
+from .sad_talker_settings import SadTalkerSettings
+from .video_bots_page_request_asr_model import VideoBotsPageRequestAsrModel
+from .video_bots_page_request_citation_style import VideoBotsPageRequestCitationStyle
+from .video_bots_page_request_embedding_model import VideoBotsPageRequestEmbeddingModel
+from .video_bots_page_request_lipsync_model import VideoBotsPageRequestLipsyncModel
+from .video_bots_page_request_openai_tts_model import VideoBotsPageRequestOpenaiTtsModel
+from .video_bots_page_request_openai_voice_name import VideoBotsPageRequestOpenaiVoiceName
+from .video_bots_page_request_selected_model import VideoBotsPageRequestSelectedModel
+from .video_bots_page_request_translation_model import VideoBotsPageRequestTranslationModel
+from .video_bots_page_request_tts_provider import VideoBotsPageRequestTtsProvider
+
+
+class VideoBotsPageRequest(pydantic_v1.BaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ input_prompt: typing.Optional[str] = None
+ input_audio: typing.Optional[str] = None
+ input_images: typing.Optional[typing.List[str]] = None
+ input_documents: typing.Optional[typing.List[str]] = None
+ doc_extract_url: typing.Optional[str] = pydantic_v1.Field(default=None)
+ """
+ Select a workflow to extract text from documents and images.
+ """
+
+ messages: typing.Optional[typing.List[ConversationEntry]] = None
+ bot_script: typing.Optional[str] = None
+ selected_model: typing.Optional[VideoBotsPageRequestSelectedModel] = None
+ document_model: typing.Optional[str] = pydantic_v1.Field(default=None)
+ """
+ When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api))
+ """
+
+ avoid_repetition: typing.Optional[bool] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[float] = None
+ max_tokens: typing.Optional[int] = None
+ sampling_temperature: typing.Optional[float] = None
+ task_instructions: typing.Optional[str] = None
+ query_instructions: typing.Optional[str] = None
+ keyword_instructions: typing.Optional[str] = None
+ documents: typing.Optional[typing.List[str]] = None
+ max_references: typing.Optional[int] = None
+ max_context_words: typing.Optional[int] = None
+ scroll_jump: typing.Optional[int] = None
+ embedding_model: typing.Optional[VideoBotsPageRequestEmbeddingModel] = None
+ dense_weight: typing.Optional[float] = pydantic_v1.Field(default=None)
+ """
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+ """
+
+ citation_style: typing.Optional[VideoBotsPageRequestCitationStyle] = None
+ use_url_shortener: typing.Optional[bool] = None
+ asr_model: typing.Optional[VideoBotsPageRequestAsrModel] = pydantic_v1.Field(default=None)
+ """
+ Choose a model to transcribe incoming audio messages to text.
+ """
+
+ asr_language: typing.Optional[str] = pydantic_v1.Field(default=None)
+ """
+ Choose a language to transcribe incoming audio messages to text.
+ """
+
+ translation_model: typing.Optional[VideoBotsPageRequestTranslationModel] = None
+ user_language: typing.Optional[str] = pydantic_v1.Field(default=None)
+ """
+ Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages.
+ """
+
+ input_glossary_document: typing.Optional[str] = pydantic_v1.Field(default=None)
+ """
+ Translation Glossary for User Langauge -> LLM Language (English)
+ """
+
+ output_glossary_document: typing.Optional[str] = pydantic_v1.Field(default=None)
+ """
+ Translation Glossary for LLM Language (English) -> User Langauge
+ """
+
+ lipsync_model: typing.Optional[VideoBotsPageRequestLipsyncModel] = None
+ tools: typing.Optional[typing.List[LlmTools]] = pydantic_v1.Field(default=None)
+ """
+ Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
+ """
+
+ tts_provider: typing.Optional[VideoBotsPageRequestTtsProvider] = None
+ uberduck_voice_name: typing.Optional[str] = None
+ uberduck_speaking_rate: typing.Optional[float] = None
+ google_voice_name: typing.Optional[str] = None
+ google_speaking_rate: typing.Optional[float] = None
+ google_pitch: typing.Optional[float] = None
+ bark_history_prompt: typing.Optional[str] = None
+ elevenlabs_voice_name: typing.Optional[str] = pydantic_v1.Field(default=None)
+ """
+ Use `elevenlabs_voice_id` instead
+ """
+
+ elevenlabs_api_key: typing.Optional[str] = None
+ elevenlabs_voice_id: typing.Optional[str] = None
+ elevenlabs_model: typing.Optional[str] = None
+ elevenlabs_stability: typing.Optional[float] = None
+ elevenlabs_similarity_boost: typing.Optional[float] = None
+ elevenlabs_style: typing.Optional[float] = None
+ elevenlabs_speaker_boost: typing.Optional[bool] = None
+ azure_voice_name: typing.Optional[str] = None
+ openai_voice_name: typing.Optional[VideoBotsPageRequestOpenaiVoiceName] = None
+ openai_tts_model: typing.Optional[VideoBotsPageRequestOpenaiTtsModel] = None
+ input_face: typing.Optional[str] = None
+ face_padding_top: typing.Optional[int] = None
+ face_padding_bottom: typing.Optional[int] = None
+ face_padding_left: typing.Optional[int] = None
+ face_padding_right: typing.Optional[int] = None
+ sadtalker_settings: typing.Optional[SadTalkerSettings] = None
+ settings: typing.Optional[RunSettings] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/video_bots_page_request_asr_model.py b/src/gooey/types/video_bots_page_request_asr_model.py
new file mode 100644
index 0000000..fa50247
--- /dev/null
+++ b/src/gooey/types/video_bots_page_request_asr_model.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+VideoBotsPageRequestAsrModel = typing.Union[
+ typing.Literal[
+ "whisper_large_v2",
+ "whisper_large_v3",
+ "whisper_hindi_large_v2",
+ "whisper_telugu_large_v2",
+ "nemo_english",
+ "nemo_hindi",
+ "vakyansh_bhojpuri",
+ "gcp_v1",
+ "usm",
+ "deepgram",
+ "azure",
+ "seamless_m4t",
+ "mms_1b_all",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/video_bots_page_request_citation_style.py b/src/gooey/types/video_bots_page_request_citation_style.py
new file mode 100644
index 0000000..dc3630b
--- /dev/null
+++ b/src/gooey/types/video_bots_page_request_citation_style.py
@@ -0,0 +1,25 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+VideoBotsPageRequestCitationStyle = typing.Union[
+ typing.Literal[
+ "number",
+ "title",
+ "url",
+ "symbol",
+ "markdown",
+ "html",
+ "slack_mrkdwn",
+ "plaintext",
+ "number_markdown",
+ "number_html",
+ "number_slack_mrkdwn",
+ "number_plaintext",
+ "symbol_markdown",
+ "symbol_html",
+ "symbol_slack_mrkdwn",
+ "symbol_plaintext",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/video_bots_page_request_embedding_model.py b/src/gooey/types/video_bots_page_request_embedding_model.py
new file mode 100644
index 0000000..19c8972
--- /dev/null
+++ b/src/gooey/types/video_bots_page_request_embedding_model.py
@@ -0,0 +1,18 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+VideoBotsPageRequestEmbeddingModel = typing.Union[
+ typing.Literal[
+ "openai_3_large",
+ "openai_3_small",
+ "openai_ada_2",
+ "e5_large_v2",
+ "e5_base_v2",
+ "multilingual_e5_base",
+ "multilingual_e5_large",
+ "gte_large",
+ "gte_base",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/video_bots_page_request_lipsync_model.py b/src/gooey/types/video_bots_page_request_lipsync_model.py
new file mode 100644
index 0000000..3bb98e0
--- /dev/null
+++ b/src/gooey/types/video_bots_page_request_lipsync_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+VideoBotsPageRequestLipsyncModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any]
diff --git a/src/gooey/types/video_bots_page_request_openai_tts_model.py b/src/gooey/types/video_bots_page_request_openai_tts_model.py
new file mode 100644
index 0000000..1df5de0
--- /dev/null
+++ b/src/gooey/types/video_bots_page_request_openai_tts_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+VideoBotsPageRequestOpenaiTtsModel = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any]
diff --git a/src/gooey/types/video_bots_page_request_openai_voice_name.py b/src/gooey/types/video_bots_page_request_openai_voice_name.py
new file mode 100644
index 0000000..a08f96c
--- /dev/null
+++ b/src/gooey/types/video_bots_page_request_openai_voice_name.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+VideoBotsPageRequestOpenaiVoiceName = typing.Union[
+ typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any
+]
diff --git a/src/gooey/types/video_bots_page_request_selected_model.py b/src/gooey/types/video_bots_page_request_selected_model.py
new file mode 100644
index 0000000..8f0e1e1
--- /dev/null
+++ b/src/gooey/types/video_bots_page_request_selected_model.py
@@ -0,0 +1,39 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+VideoBotsPageRequestSelectedModel = typing.Union[
+ typing.Literal[
+ "gpt_4_o",
+ "gpt_4_turbo_vision",
+ "gpt_4_vision",
+ "gpt_4_turbo",
+ "gpt_4",
+ "gpt_4_32k",
+ "gpt_3_5_turbo",
+ "gpt_3_5_turbo_16k",
+ "gpt_3_5_turbo_instruct",
+ "llama3_70b",
+ "llama3_8b",
+ "llama2_70b_chat",
+ "mixtral_8x7b_instruct_0_1",
+ "gemma_7b_it",
+ "gemini_1_5_pro",
+ "gemini_1_pro_vision",
+ "gemini_1_pro",
+ "palm2_chat",
+ "palm2_text",
+ "claude_3_5_sonnet",
+ "claude_3_opus",
+ "claude_3_sonnet",
+ "claude_3_haiku",
+ "sea_lion_7b_instruct",
+ "text_davinci_003",
+ "text_davinci_002",
+ "code_davinci_002",
+ "text_curie_001",
+ "text_babbage_001",
+ "text_ada_001",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/video_bots_page_request_translation_model.py b/src/gooey/types/video_bots_page_request_translation_model.py
new file mode 100644
index 0000000..0373c0c
--- /dev/null
+++ b/src/gooey/types/video_bots_page_request_translation_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+VideoBotsPageRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any]
diff --git a/src/gooey/types/video_bots_page_request_tts_provider.py b/src/gooey/types/video_bots_page_request_tts_provider.py
new file mode 100644
index 0000000..3fc8d0a
--- /dev/null
+++ b/src/gooey/types/video_bots_page_request_tts_provider.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+VideoBotsPageRequestTtsProvider = typing.Union[
+ typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any
+]
diff --git a/src/gooey/types/video_bots_page_response.py b/src/gooey/types/video_bots_page_response.py
new file mode 100644
index 0000000..14becc3
--- /dev/null
+++ b/src/gooey/types/video_bots_page_response.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .video_bots_page_output import VideoBotsPageOutput
+
+
+class VideoBotsPageResponse(pydantic_v1.BaseModel):
+ id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ output: VideoBotsPageOutput = pydantic_v1.Field()
+ """
+ Output of the run
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/types/video_bots_page_status_response.py b/src/gooey/types/video_bots_page_status_response.py
new file mode 100644
index 0000000..328ed31
--- /dev/null
+++ b/src/gooey/types/video_bots_page_status_response.py
@@ -0,0 +1,64 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .recipe_run_state import RecipeRunState
+from .video_bots_page_output import VideoBotsPageOutput
+
+
+class VideoBotsPageStatusResponse(pydantic_v1.BaseModel):
+ run_id: str = pydantic_v1.Field()
+ """
+ Unique ID for this run
+ """
+
+ web_url: str = pydantic_v1.Field()
+ """
+ Web URL for this run
+ """
+
+ created_at: str = pydantic_v1.Field()
+ """
+ Time when the run was created as ISO format
+ """
+
+ run_time_sec: int = pydantic_v1.Field()
+ """
+ Total run time in seconds
+ """
+
+ status: RecipeRunState = pydantic_v1.Field()
+ """
+ Status of the run
+ """
+
+ detail: str = pydantic_v1.Field()
+ """
+ Details about the status of the run as a human readable string
+ """
+
+ output: typing.Optional[VideoBotsPageOutput] = pydantic_v1.Field(default=None)
+ """
+ Output of the run. Only available if status is `"completed"`
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/gooey/version.py b/src/gooey/version.py
new file mode 100644
index 0000000..c6a685e
--- /dev/null
+++ b/src/gooey/version.py
@@ -0,0 +1,4 @@
+
+from importlib import metadata
+
+__version__ = metadata.version("gooey")
diff --git a/src/gooey/web_search_gpt3/__init__.py b/src/gooey/web_search_gpt3/__init__.py
new file mode 100644
index 0000000..f3ea265
--- /dev/null
+++ b/src/gooey/web_search_gpt3/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/gooey/web_search_gpt3/client.py b/src/gooey/web_search_gpt3/client.py
new file mode 100644
index 0000000..49a6581
--- /dev/null
+++ b/src/gooey/web_search_gpt3/client.py
@@ -0,0 +1,770 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import pydantic_v1
+from ..core.request_options import RequestOptions
+from ..errors.internal_server_error import InternalServerError
+from ..errors.payment_required_error import PaymentRequiredError
+from ..errors.too_many_requests_error import TooManyRequestsError
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.async_api_response_model_v3 import AsyncApiResponseModelV3
+from ..types.failed_reponse_model_v2 import FailedReponseModelV2
+from ..types.generic_error_response import GenericErrorResponse
+from ..types.google_gpt_page_request_embedding_model import GoogleGptPageRequestEmbeddingModel
+from ..types.google_gpt_page_request_selected_model import GoogleGptPageRequestSelectedModel
+from ..types.google_gpt_page_response import GoogleGptPageResponse
+from ..types.google_gpt_page_status_response import GoogleGptPageStatusResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.recipe_function import RecipeFunction
+from ..types.run_settings import RunSettings
+from ..types.serp_search_location import SerpSearchLocation
+from ..types.serp_search_type import SerpSearchType
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class WebSearchGpt3Client:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def google_gpt(
+ self,
+ *,
+ search_query: str,
+ site_filter: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ serp_search_location: typing.Optional[SerpSearchLocation] = OMIT,
+ scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
+ serp_search_type: typing.Optional[SerpSearchType] = OMIT,
+ scaleserp_search_field: typing.Optional[str] = OMIT,
+ task_instructions: typing.Optional[str] = OMIT,
+ query_instructions: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[GoogleGptPageRequestSelectedModel] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ max_search_urls: typing.Optional[int] = OMIT,
+ max_references: typing.Optional[int] = OMIT,
+ max_context_words: typing.Optional[int] = OMIT,
+ scroll_jump: typing.Optional[int] = OMIT,
+ embedding_model: typing.Optional[GoogleGptPageRequestEmbeddingModel] = OMIT,
+ dense_weight: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> GoogleGptPageResponse:
+ """
+ Parameters
+ ----------
+ search_query : str
+
+ site_filter : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ serp_search_location : typing.Optional[SerpSearchLocation]
+
+ scaleserp_locations : typing.Optional[typing.Sequence[str]]
+ DEPRECATED: use `serp_search_location` instead
+
+ serp_search_type : typing.Optional[SerpSearchType]
+
+ scaleserp_search_field : typing.Optional[str]
+ DEPRECATED: use `serp_search_type` instead
+
+ task_instructions : typing.Optional[str]
+
+ query_instructions : typing.Optional[str]
+
+ selected_model : typing.Optional[GoogleGptPageRequestSelectedModel]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ max_search_urls : typing.Optional[int]
+
+ max_references : typing.Optional[int]
+
+ max_context_words : typing.Optional[int]
+
+ scroll_jump : typing.Optional[int]
+
+ embedding_model : typing.Optional[GoogleGptPageRequestEmbeddingModel]
+
+ dense_weight : typing.Optional[float]
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GoogleGptPageResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.web_search_gpt3.google_gpt(
+ search_query="search_query",
+ site_filter="site_filter",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v2/google-gpt/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "serp_search_location": serp_search_location,
+ "scaleserp_locations": scaleserp_locations,
+ "serp_search_type": serp_search_type,
+ "scaleserp_search_field": scaleserp_search_field,
+ "search_query": search_query,
+ "site_filter": site_filter,
+ "task_instructions": task_instructions,
+ "query_instructions": query_instructions,
+ "selected_model": selected_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "max_search_urls": max_search_urls,
+ "max_references": max_references,
+ "max_context_words": max_context_words,
+ "scroll_jump": scroll_jump,
+ "embedding_model": embedding_model,
+ "dense_weight": dense_weight,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(GoogleGptPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def async_google_gpt(
+ self,
+ *,
+ search_query: str,
+ site_filter: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ serp_search_location: typing.Optional[SerpSearchLocation] = OMIT,
+ scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
+ serp_search_type: typing.Optional[SerpSearchType] = OMIT,
+ scaleserp_search_field: typing.Optional[str] = OMIT,
+ task_instructions: typing.Optional[str] = OMIT,
+ query_instructions: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[GoogleGptPageRequestSelectedModel] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ max_search_urls: typing.Optional[int] = OMIT,
+ max_references: typing.Optional[int] = OMIT,
+ max_context_words: typing.Optional[int] = OMIT,
+ scroll_jump: typing.Optional[int] = OMIT,
+ embedding_model: typing.Optional[GoogleGptPageRequestEmbeddingModel] = OMIT,
+ dense_weight: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ search_query : str
+
+ site_filter : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ serp_search_location : typing.Optional[SerpSearchLocation]
+
+ scaleserp_locations : typing.Optional[typing.Sequence[str]]
+ DEPRECATED: use `serp_search_location` instead
+
+ serp_search_type : typing.Optional[SerpSearchType]
+
+ scaleserp_search_field : typing.Optional[str]
+ DEPRECATED: use `serp_search_type` instead
+
+ task_instructions : typing.Optional[str]
+
+ query_instructions : typing.Optional[str]
+
+ selected_model : typing.Optional[GoogleGptPageRequestSelectedModel]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ max_search_urls : typing.Optional[int]
+
+ max_references : typing.Optional[int]
+
+ max_context_words : typing.Optional[int]
+
+ scroll_jump : typing.Optional[int]
+
+ embedding_model : typing.Optional[GoogleGptPageRequestEmbeddingModel]
+
+ dense_weight : typing.Optional[float]
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.web_search_gpt3.async_google_gpt(
+ search_query="search_query",
+ site_filter="site_filter",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/google-gpt/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "serp_search_location": serp_search_location,
+ "scaleserp_locations": scaleserp_locations,
+ "serp_search_type": serp_search_type,
+ "scaleserp_search_field": scaleserp_search_field,
+ "search_query": search_query,
+ "site_filter": site_filter,
+ "task_instructions": task_instructions,
+ "query_instructions": query_instructions,
+ "selected_model": selected_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "max_search_urls": max_search_urls,
+ "max_references": max_references,
+ "max_context_words": max_context_words,
+ "scroll_jump": scroll_jump,
+ "embedding_model": embedding_model,
+ "dense_weight": dense_weight,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def status_google_gpt(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> GoogleGptPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GoogleGptPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ from gooey.client import Gooey
+
+ client = Gooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+ client.web_search_gpt3.status_google_gpt(
+ run_id="run_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v3/google-gpt/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(GoogleGptPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncWebSearchGpt3Client:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def google_gpt(
+ self,
+ *,
+ search_query: str,
+ site_filter: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ serp_search_location: typing.Optional[SerpSearchLocation] = OMIT,
+ scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
+ serp_search_type: typing.Optional[SerpSearchType] = OMIT,
+ scaleserp_search_field: typing.Optional[str] = OMIT,
+ task_instructions: typing.Optional[str] = OMIT,
+ query_instructions: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[GoogleGptPageRequestSelectedModel] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ max_search_urls: typing.Optional[int] = OMIT,
+ max_references: typing.Optional[int] = OMIT,
+ max_context_words: typing.Optional[int] = OMIT,
+ scroll_jump: typing.Optional[int] = OMIT,
+ embedding_model: typing.Optional[GoogleGptPageRequestEmbeddingModel] = OMIT,
+ dense_weight: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> GoogleGptPageResponse:
+ """
+ Parameters
+ ----------
+ search_query : str
+
+ site_filter : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ serp_search_location : typing.Optional[SerpSearchLocation]
+
+ scaleserp_locations : typing.Optional[typing.Sequence[str]]
+ DEPRECATED: use `serp_search_location` instead
+
+ serp_search_type : typing.Optional[SerpSearchType]
+
+ scaleserp_search_field : typing.Optional[str]
+ DEPRECATED: use `serp_search_type` instead
+
+ task_instructions : typing.Optional[str]
+
+ query_instructions : typing.Optional[str]
+
+ selected_model : typing.Optional[GoogleGptPageRequestSelectedModel]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ max_search_urls : typing.Optional[int]
+
+ max_references : typing.Optional[int]
+
+ max_context_words : typing.Optional[int]
+
+ scroll_jump : typing.Optional[int]
+
+ embedding_model : typing.Optional[GoogleGptPageRequestEmbeddingModel]
+
+ dense_weight : typing.Optional[float]
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GoogleGptPageResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.web_search_gpt3.google_gpt(
+ search_query="search_query",
+ site_filter="site_filter",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v2/google-gpt/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "serp_search_location": serp_search_location,
+ "scaleserp_locations": scaleserp_locations,
+ "serp_search_type": serp_search_type,
+ "scaleserp_search_field": scaleserp_search_field,
+ "search_query": search_query,
+ "site_filter": site_filter,
+ "task_instructions": task_instructions,
+ "query_instructions": query_instructions,
+ "selected_model": selected_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "max_search_urls": max_search_urls,
+ "max_references": max_references,
+ "max_context_words": max_context_words,
+ "scroll_jump": scroll_jump,
+ "embedding_model": embedding_model,
+ "dense_weight": dense_weight,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(GoogleGptPageResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ if _response.status_code == 500:
+ raise InternalServerError(
+ pydantic_v1.parse_obj_as(FailedReponseModelV2, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def async_google_gpt(
+ self,
+ *,
+ search_query: str,
+ site_filter: str,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ serp_search_location: typing.Optional[SerpSearchLocation] = OMIT,
+ scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
+ serp_search_type: typing.Optional[SerpSearchType] = OMIT,
+ scaleserp_search_field: typing.Optional[str] = OMIT,
+ task_instructions: typing.Optional[str] = OMIT,
+ query_instructions: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[GoogleGptPageRequestSelectedModel] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ max_search_urls: typing.Optional[int] = OMIT,
+ max_references: typing.Optional[int] = OMIT,
+ max_context_words: typing.Optional[int] = OMIT,
+ scroll_jump: typing.Optional[int] = OMIT,
+ embedding_model: typing.Optional[GoogleGptPageRequestEmbeddingModel] = OMIT,
+ dense_weight: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncApiResponseModelV3:
+ """
+ Parameters
+ ----------
+ search_query : str
+
+ site_filter : str
+
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
+
+ variables : typing.Optional[typing.Dict[str, typing.Any]]
+ Variables to be used as Jinja prompt templates and in functions as arguments
+
+ serp_search_location : typing.Optional[SerpSearchLocation]
+
+ scaleserp_locations : typing.Optional[typing.Sequence[str]]
+ DEPRECATED: use `serp_search_location` instead
+
+ serp_search_type : typing.Optional[SerpSearchType]
+
+ scaleserp_search_field : typing.Optional[str]
+ DEPRECATED: use `serp_search_type` instead
+
+ task_instructions : typing.Optional[str]
+
+ query_instructions : typing.Optional[str]
+
+ selected_model : typing.Optional[GoogleGptPageRequestSelectedModel]
+
+ avoid_repetition : typing.Optional[bool]
+
+ num_outputs : typing.Optional[int]
+
+ quality : typing.Optional[float]
+
+ max_tokens : typing.Optional[int]
+
+ sampling_temperature : typing.Optional[float]
+
+ max_search_urls : typing.Optional[int]
+
+ max_references : typing.Optional[int]
+
+ max_context_words : typing.Optional[int]
+
+ scroll_jump : typing.Optional[int]
+
+ embedding_model : typing.Optional[GoogleGptPageRequestEmbeddingModel]
+
+ dense_weight : typing.Optional[float]
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+
+ settings : typing.Optional[RunSettings]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncApiResponseModelV3
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.web_search_gpt3.async_google_gpt(
+ search_query="search_query",
+ site_filter="site_filter",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/google-gpt/async/",
+ method="POST",
+ json={
+ "functions": functions,
+ "variables": variables,
+ "serp_search_location": serp_search_location,
+ "scaleserp_locations": scaleserp_locations,
+ "serp_search_type": serp_search_type,
+ "scaleserp_search_field": scaleserp_search_field,
+ "search_query": search_query,
+ "site_filter": site_filter,
+ "task_instructions": task_instructions,
+ "query_instructions": query_instructions,
+ "selected_model": selected_model,
+ "avoid_repetition": avoid_repetition,
+ "num_outputs": num_outputs,
+ "quality": quality,
+ "max_tokens": max_tokens,
+ "sampling_temperature": sampling_temperature,
+ "max_search_urls": max_search_urls,
+ "max_references": max_references,
+ "max_context_words": max_context_words,
+ "scroll_jump": scroll_jump,
+ "embedding_model": embedding_model,
+ "dense_weight": dense_weight,
+ "settings": settings,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(AsyncApiResponseModelV3, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def status_google_gpt(
+ self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> GoogleGptPageStatusResponse:
+ """
+ Parameters
+ ----------
+ run_id : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GoogleGptPageStatusResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from gooey.client import AsyncGooey
+
+ client = AsyncGooey(
+ authorization="YOUR_AUTHORIZATION",
+ )
+
+
+ async def main() -> None:
+ await client.web_search_gpt3.status_google_gpt(
+ run_id="run_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v3/google-gpt/status/", method="GET", params={"run_id": run_id}, request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(GoogleGptPageStatusResponse, _response.json()) # type: ignore
+ if _response.status_code == 402:
+ raise PaymentRequiredError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
+ )
+ if _response.status_code == 429:
+ raise TooManyRequestsError(
+ pydantic_v1.parse_obj_as(GenericErrorResponse, _response.json()) # type: ignore
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/tests/custom/test_client.py b/tests/custom/test_client.py
new file mode 100644
index 0000000..60a58e6
--- /dev/null
+++ b/tests/custom/test_client.py
@@ -0,0 +1,6 @@
+import pytest
+
+# Get started with writing tests with pytest at https://docs.pytest.org
+@pytest.mark.skip(reason="Unimplemented")
+def test_client() -> None:
+ assert True == True
diff --git a/tests/utils/test_http_client.py b/tests/utils/test_http_client.py
new file mode 100644
index 0000000..b57dffd
--- /dev/null
+++ b/tests/utils/test_http_client.py
@@ -0,0 +1,47 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from gooey.core.http_client import get_request_body
+from gooey.core.request_options import RequestOptions
+
+
+def get_request_options() -> RequestOptions:
+ return {"additional_body_parameters": {"see you": "later"}}
+
+
+def test_get_json_request_body() -> None:
+ json_body, data_body = get_request_body(json={"hello": "world"}, data=None, request_options=None, omit=None)
+ assert json_body == {"hello": "world"}
+ assert data_body is None
+
+ json_body_extras, data_body_extras = get_request_body(
+ json={"goodbye": "world"}, data=None, request_options=get_request_options(), omit=None
+ )
+
+ assert json_body_extras == {"goodbye": "world", "see you": "later"}
+ assert data_body_extras is None
+
+
+def test_get_files_request_body() -> None:
+ json_body, data_body = get_request_body(json=None, data={"hello": "world"}, request_options=None, omit=None)
+ assert data_body == {"hello": "world"}
+ assert json_body is None
+
+ json_body_extras, data_body_extras = get_request_body(
+ json=None, data={"goodbye": "world"}, request_options=get_request_options(), omit=None
+ )
+
+ assert data_body_extras == {"goodbye": "world", "see you": "later"}
+ assert json_body_extras is None
+
+
+def test_get_none_request_body() -> None:
+ json_body, data_body = get_request_body(json=None, data=None, request_options=None, omit=None)
+ assert data_body is None
+ assert json_body is None
+
+ json_body_extras, data_body_extras = get_request_body(
+ json=None, data=None, request_options=get_request_options(), omit=None
+ )
+
+ assert json_body_extras == {"see you": "later"}
+ assert data_body_extras is None
diff --git a/tests/utils/test_query_encoding.py b/tests/utils/test_query_encoding.py
new file mode 100644
index 0000000..43e9d34
--- /dev/null
+++ b/tests/utils/test_query_encoding.py
@@ -0,0 +1,13 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from gooey.core.query_encoder import encode_query
+
+
+def test_query_encoding() -> None:
+ assert encode_query({"hello world": "hello world"}) == {"hello world": "hello world"}
+ assert encode_query({"hello_world": {"hello": "world"}}) == {"hello_world[hello]": "world"}
+ assert encode_query({"hello_world": {"hello": {"world": "today"}, "test": "this"}, "hi": "there"}) == {
+ "hello_world[hello][world]": "today",
+ "hello_world[test]": "this",
+ "hi": "there",
+ }